language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 24962,
"end": 25254
} | class ____:
param_names = ["shape"]
params = [
get_benchmark_shapes("TimeDescribe"),
]
def setup(self, shape):
self.df = generate_dataframe("int", *shape, RAND_LOW, RAND_HIGH)
def time_describe(self, shape):
execute(self.df.describe())
| TimeDescribe |
python | pytorch__pytorch | test/dynamo/test_guard_serialization.py | {
"start": 2641,
"end": 2725
} | class ____(torch.nn.Module):
def forward(self, x):
return x + 2
| FlatModule |
python | huggingface__transformers | src/transformers/models/t5gemma/modular_t5gemma.py | {
"start": 2008,
"end": 9932
} | class ____(Gemma2Config):
r"""
This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the T5GemmaModule-7B.
e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5GemmaModuleModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*, defaults to 30.0):
scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
scaling factor when applying tanh softcapping on the attention scores.
```python
>>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig
>>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration
>>> configuration = T5GemmaModuleConfig()
>>> # Initializing a model from the t5_gemma_module-7b style configuration
>>> model = T5GemmaModuleModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 2304,
intermediate_size: Optional[int] = 9216,
num_hidden_layers: Optional[int] = 26,
num_attention_heads: Optional[int] = 8,
num_key_value_heads: Optional[int] = 4,
head_dim: Optional[int] = 256,
hidden_activation: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
query_pre_attn_scalar: Optional[int] = 256,
sliding_window: Optional[int] = 4096,
layer_types: Optional[list[str]] = None,
final_logit_softcapping: Optional[float] = 30.0,
attn_logit_softcapping: Optional[float] = 50.0,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
head_dim=head_dim,
hidden_activation=hidden_activation,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
bos_token_id=bos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
query_pre_attn_scalar=query_pre_attn_scalar,
sliding_window=sliding_window,
layer_types=layer_types,
final_logit_softcapping=final_logit_softcapping,
attn_logit_softcapping=attn_logit_softcapping,
**kwargs,
)
del self.use_bidirectional_attention
| T5GemmaModuleConfig |
python | justquick__django-activity-stream | actstream/drf/serializers.py | {
"start": 249,
"end": 2274
} | class ____(serializers.RelatedField):
"""
Expands related fields to use other Serializer. Similar to the AS1 JSON spec
"""
def to_representation(self, value):
return registered_serializers[value.__class__](value).data
DEFAULT_SERIALIZER = serializers.ModelSerializer
def serializer_factory(model_class):
"""
Returns a subclass of `ModelSerializer` for each model_class in the registry
"""
model_label = label(model_class).lower()
if model_label in DRF_SETTINGS['SERIALIZERS']:
return import_obj(DRF_SETTINGS['SERIALIZERS'][model_label])
model_fields = DRF_SETTINGS['MODEL_FIELDS'].get(model_label, '__all__')
meta_class = type('Meta', (), {'model': model_class, 'fields': model_fields})
return type(f'{model_class.__name__}Serializer', (DEFAULT_SERIALIZER,), {'Meta': meta_class})
def related_field_factory(model_class, queryset=None):
"""
Returns a subclass of `RelatedField` for each model_class in the registry
"""
if queryset is None:
queryset = model_class.objects.all()
related_field_class = serializers.PrimaryKeyRelatedField
kwargs = {'queryset': queryset}
if DRF_SETTINGS['HYPERLINK_FIELDS']:
related_field_class = serializers.HyperlinkedRelatedField
kwargs['view_name'] = f'{label(model_class)}-detail'
elif DRF_SETTINGS['EXPAND_FIELDS']:
related_field_class = ExpandRelatedField
field = type(f'{model_class.__name__}RelatedField', (related_field_class,), {})
return field(**kwargs)
def registry_factory(factory):
"""
Returns a mapping of the registry's model_class applied with the factory function
"""
return {model_class: factory(model_class) for model_class in registry}
def get_grf():
"""
Get a new `GenericRelatedField` instance for each use of the related field
"""
return GenericRelatedField(registry_factory(related_field_factory), read_only=True)
registered_serializers = registry_factory(serializer_factory)
| ExpandRelatedField |
python | python__mypy | mypy/checker.py | {
"start": 403043,
"end": 403932
} | class ____(BoolTypeQuery):
"""Find type components that are not valid for an inferred type.
These include <Erased> type, and any uninhabited types resulting from failed
(ambiguous) type inference.
"""
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return t.ambiguous
def visit_erased_type(self, t: ErasedType) -> bool:
# This can happen inside a lambda.
return True
def visit_type_var(self, t: TypeVarType) -> bool:
# This is needed to prevent leaking into partial types during
# multi-step type inference.
return t.id.is_meta_var()
def visit_tuple_type(self, t: TupleType, /) -> bool:
# Exclude fallback to avoid bogus "need type annotation" errors
return self.query_types(t.items)
| InvalidInferredTypes |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_day_sum_to_be_close_to_equivalent_week_day_mean.py | {
"start": 624,
"end": 9442
} | class ____(QueryExpectation):
"""Expect the daily sums of the given column to be close to the average sums calculated 4 weeks back.
This metric expects daily sums of the given column, to be close to the average sums calculated 4 weeks back,
respective to the specific day of the week.
The expectation fails if the difference in percentage ((current_sum - average_sum) / average_sum) is more than the
threshold given by user (default value is 25%).
The threshold parameter should be given in fraction and not percent, i.e. for 25% define threshold = 0.25.
Keyword args:
- threshold (float; default = 0.25): threshold of difference between current and past weeks over which expectation fails
- weeks_back (int; default = 4): how many weeks back to compare the current metric with
"""
FOUR_PREVIOUS_WEEKS = [7, 14, 21, 28]
DAYS_AGO = {
3: TODAY - timedelta(days=3),
7: TODAY - timedelta(days=7),
14: TODAY - timedelta(days=14),
21: TODAY - timedelta(days=21),
28: TODAY - timedelta(days=28),
}
query = """
SELECT {date_column} as date_column, SUM({summed_column}) as column_sum_over_date
FROM {batch}
GROUP BY {date_column}
"""
run_date = TODAY
# Default values
default_kwarg_values = {
"result_format": "BASIC",
"catch_exceptions": False,
"meta": None,
"threshold": 0.25,
"query": query,
"weeks_back": 4,
}
examples = [
{
# INFO: column a - good counts - 3 rows for every day
"data": {
"date_column_a": generate_data_sample(
{
TODAY: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[0]]: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[1]]: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[2]]: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[3]]: 3,
}
),
"summed_column_a": generate_data_sample({1: 15}),
"date_column_b": generate_data_sample(
{
TODAY: 2,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[0]]: 4,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[1]]: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[2]]: 3,
DAYS_AGO[FOUR_PREVIOUS_WEEKS[3]]: 3,
}
),
"summed_column_b": generate_data_sample({1: 15}),
"summed_column_zero_avg": generate_data_sample({1: 3, 0: 12}),
"summed_column_zero_current": generate_data_sample({1: 3, 0: 12}),
"summed_column_zero_both": generate_data_sample({1: 3, 0: 12}),
},
# INFO: "column_b": [today, yesterday, yesterday, two_days_ago]},
"suppress_test_for": ["bigquery"],
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"template_dict": {
"summed_column": "summed_column_a",
"date_column": "date_column_a",
},
"run_date": TODAY_STR,
"threshold": default_kwarg_values["threshold"],
"weeks_back": default_kwarg_values["weeks_back"],
},
"out": {"success": True},
},
{
"title": "negative test",
"exact_match_out": False,
"include_in_gallery": False,
"in": {
"template_dict": {
"summed_column": "summed_column_b",
"date_column": "date_column_b",
},
"run_date": TODAY_STR,
"threshold": default_kwarg_values["threshold"],
},
"out": {"success": False},
},
{
"title": "negative test",
"exact_match_out": False,
"include_in_gallery": False,
"in": {
"template_dict": {
"summed_column": "summed_column_zero_avg",
"date_column": "date_column_a",
},
"run_date": TODAY_STR,
"threshold": default_kwarg_values["threshold"],
},
"out": {"success": False},
},
{
"title": "negative test",
"exact_match_out": False,
"include_in_gallery": False,
"in": {
"template_dict": {
"summed_column": "summed_column_zero_current",
"date_column": "date_column_a",
},
"run_date": TODAY_STR,
"threshold": default_kwarg_values["threshold"],
},
"out": {"success": False},
},
{
"title": "negative test",
"exact_match_out": False,
"include_in_gallery": False,
"in": {
"template_dict": {
"summed_column": "summed_column_zero_both",
"date_column": "date_column_a",
},
"run_date": TODAY_STR,
"threshold": default_kwarg_values["threshold"],
},
"out": {"success": False},
},
],
}
]
metric_dependencies = ("query.template_values",)
success_keys = ("template_dict", "threshold", "query", "run_date", "weeks_back")
domain_keys = (
"template_dict",
"query",
)
library_metadata = {"tags": ["query-based"], "contributors": ["@itaise", "@hadasm"]}
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
success_kwargs = self._get_success_kwargs()
run_date: str = success_kwargs.get("run_date")
threshold: float = float(success_kwargs.get("threshold"))
weeks_back = success_kwargs.get("weeks_back")
days_back_list = [DAYS_IN_WEEK * week_index for week_index in range(1, weeks_back + 1)]
result_dict = get_results_dict(metrics)
yesterday_sum: int = result_dict[run_date]
diff_fraction = get_diff_fraction(yesterday_sum, result_dict, days_back_list)
if diff_fraction > threshold:
msg = (
f"The diff between yesterday's count and the avg. count ({diff_fraction:.0%}) exceeds the defined "
f"threshold ({threshold:.0%})"
)
success = False
else:
msg = (
f"The diff between yesterday's count ({yesterday_sum}) and the avg. count ({diff_fraction:.0%}) "
f"is below threshold"
)
success = True
return {"success": success, "result": {"details": msg}}
def get_results_dict(metrics: dict) -> dict:
metrics = convert_to_json_serializable(data=metrics)
result_list = metrics.get("query.template_values")
result_dict = {}
result_dict.update({i["date_column"]: i["column_sum_over_date"] for i in result_list})
return result_dict
def average_if_nonempty(list_: list):
return sum(list_) / len(list_) if len(list_) > 0 else 0
def get_diff_fraction(yesterday_sum: int, result_dict: dict, days_back_list: List[int]):
days_ago_dict = {days_ago: TODAY - timedelta(days=days_ago) for days_ago in days_back_list}
equivalent_previous_days: List[date] = list(days_ago_dict.values())
equivalent_previous_days_str: List[str] = [
datetime.strftime(i, date_format) for i in equivalent_previous_days
]
previous_days_sums: List[int] = [
result_dict[equiv_day] for equiv_day in equivalent_previous_days_str
]
avg_equivalent_previous_days_sum = average_if_nonempty(previous_days_sums)
absolute_diff = abs(yesterday_sum - avg_equivalent_previous_days_sum)
return (absolute_diff + 1) / (avg_equivalent_previous_days_sum + 1)
if __name__ == "__main__":
ExpectDaySumToBeCloseToEquivalentWeekDayMean().print_diagnostic_checklist()
| ExpectDaySumToBeCloseToEquivalentWeekDayMean |
python | numba__numba | numba/cuda/tests/cudapy/test_dispatcher.py | {
"start": 3652,
"end": 16983
} | class ____(CUDATestCase):
"""Most tests based on those in numba.tests.test_dispatcher."""
def test_coerce_input_types(self):
# Do not allow unsafe conversions if we can still compile other
# specializations.
c_add = cuda.jit(add_kernel)
# Using a complex128 allows us to represent any result produced by the
# test
r = np.zeros(1, dtype=np.complex128)
c_add[1, 1](r, 123, 456)
self.assertEqual(r[0], add(123, 456))
c_add[1, 1](r, 12.3, 45.6)
self.assertEqual(r[0], add(12.3, 45.6))
c_add[1, 1](r, 12.3, 45.6j)
self.assertEqual(r[0], add(12.3, 45.6j))
c_add[1, 1](r, 12300000000, 456)
self.assertEqual(r[0], add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 123, 456)
self.assertPreciseEqual(r[0], add(123, 456))
@skip_on_cudasim('Simulator ignores signature')
@unittest.expectedFailure
def test_coerce_input_types_unsafe(self):
# Implicit (unsafe) conversion of float to int, originally from
# test_coerce_input_types. This test presently fails with the CUDA
# Dispatcher because argument preparation is done by
# _Kernel._prepare_args, which is currently inflexible with respect to
# the types it can accept when preparing.
#
# This test is marked as xfail until future changes enable this
# behavior.
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 12.3, 45.6)
self.assertPreciseEqual(r[0], add(12, 45))
@skip_on_cudasim('Simulator ignores signature')
def test_coerce_input_types_unsafe_complex(self):
# Implicit conversion of complex to int disallowed
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
with self.assertRaises(TypeError):
c_add[1, 1](r, 12.3, 45.6j)
@skip_on_cudasim('Simulator does not track overloads')
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
c_add = cuda.jit(add_kernel)
r = np.zeros(1, dtype=np.float64)
INT = 1
FLT = 1.5
c_add[1, 1](r, INT, FLT)
self.assertAlmostEqual(r[0], INT + FLT)
self.assertEqual(len(c_add.overloads), 1)
c_add[1, 1](r, FLT, INT)
self.assertAlmostEqual(r[0], FLT + INT)
self.assertEqual(len(c_add.overloads), 2)
c_add[1, 1](r, FLT, FLT)
self.assertAlmostEqual(r[0], FLT + FLT)
self.assertEqual(len(c_add.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
c_add[1, 1](r, 1, 1)
self.assertAlmostEqual(r[0], INT + INT)
self.assertEqual(len(c_add.overloads), 4, "didn't compile a new "
"version")
@skip_on_cudasim("Simulator doesn't support concurrent kernels")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@cuda.jit
def foo(r, x):
r[0] = x + 1
def wrapper():
try:
r = np.zeros(1, dtype=np.int64)
foo[1, 1](r, 1)
self.assertEqual(r[0], 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def _test_explicit_signatures(self, sigs):
f = cuda.jit(sigs)(add_kernel)
# Exact signature matches
r = np.zeros(1, dtype=np.int64)
f[1, 1](r, 1, 2)
self.assertPreciseEqual(r[0], 3)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1.5, 2.5)
self.assertPreciseEqual(r[0], 4.0)
if config.ENABLE_CUDASIM:
# Pass - we can't check for no conversion on the simulator.
return
# No conversion
with self.assertRaises(TypeError) as cm:
r = np.zeros(1, dtype=np.complex128)
f[1, 1](r, 1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
def test_explicit_signatures_strings(self):
# Check with a list of strings for signatures
sigs = ["(int64[::1], int64, int64)",
"(float64[::1], float64, float64)"]
self._test_explicit_signatures(sigs)
def test_explicit_signatures_tuples(self):
# Check with a list of tuples of argument types for signatures
sigs = [(int64[::1], int64, int64), (float64[::1], float64, float64)]
self._test_explicit_signatures(sigs)
def test_explicit_signatures_signatures(self):
# Check with a list of Signature objects for signatures
sigs = [void(int64[::1], int64, int64),
void(float64[::1], float64, float64)]
self._test_explicit_signatures(sigs)
def test_explicit_signatures_mixed(self):
# Check when we mix types of signature objects in a list of signatures
# Tuple and string
sigs = [(int64[::1], int64, int64),
"(float64[::1], float64, float64)"]
self._test_explicit_signatures(sigs)
# Tuple and Signature object
sigs = [(int64[::1], int64, int64),
void(float64[::1], float64, float64)]
self._test_explicit_signatures(sigs)
# Signature object and string
sigs = [void(int64[::1], int64, int64),
"(float64[::1], float64, float64)"]
self._test_explicit_signatures(sigs)
def test_explicit_signatures_same_type_class(self):
# A more interesting one...
# (Note that the type of r is deliberately float64 in both cases so
# that dispatch is differentiated on the types of x and y only, to
# closely preserve the intent of the original test from
# numba.tests.test_dispatcher)
sigs = ["(float64[::1], float32, float32)",
"(float64[::1], float64, float64)"]
f = cuda.jit(sigs)(add_kernel)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, np.float32(1), np.float32(2**-25))
self.assertPreciseEqual(r[0], 1.0)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1, 2**-25)
self.assertPreciseEqual(r[0], 1.0000000298023224)
@skip_on_cudasim('No overload resolution in the simulator')
def test_explicit_signatures_ambiguous_resolution(self):
# Fail to resolve ambiguity between the two best overloads
# (Also deliberate float64[::1] for the first argument in all cases)
f = cuda.jit(["(float64[::1], float32, float64)",
"(float64[::1], float64, float32)",
"(float64[::1], int64, int64)"])(add_kernel)
with self.assertRaises(TypeError) as cm:
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegex(
str(cm.exception),
r"Ambiguous overloading for <function add_kernel [^>]*> "
r"\(Array\(float64, 1, 'C', False, aligned=True\), float64,"
r" float64\):\n"
r"\(Array\(float64, 1, 'C', False, aligned=True\), float32,"
r" float64\) -> none\n"
r"\(Array\(float64, 1, 'C', False, aligned=True\), float64,"
r" float32\) -> none"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
@skip_on_cudasim('Simulator does not use _prepare_args')
@unittest.expectedFailure
def test_explicit_signatures_unsafe(self):
# These tests are from test_explicit_signatures, but have to be xfail
# at present because _prepare_args in the CUDA target cannot handle
# unsafe conversions of arguments.
f = cuda.jit("(int64[::1], int64, int64)")(add_kernel)
r = np.zeros(1, dtype=np.int64)
# Approximate match (unsafe conversion)
f[1, 1](r, 1.5, 2.5)
self.assertPreciseEqual(r[0], 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
sigs = ["(int64[::1], int64, int64)",
"(float64[::1], float64, float64)"]
f = cuda.jit(sigs)(add_kernel)
r = np.zeros(1, dtype=np.float64)
# Approximate match (int32 -> float64 is a safe conversion)
f[1, 1](r, np.int32(1), 2.5)
self.assertPreciseEqual(r[0], 3.5)
def add_device_usecase(self, sigs):
# Generate a kernel that calls the add device function compiled with a
# given set of signatures
add_device = cuda.jit(sigs, device=True)(add)
@cuda.jit
def f(r, x, y):
r[0] = add_device(x, y)
return f
def test_explicit_signatures_device(self):
# Tests similar to test_explicit_signatures, but on a device function
# instead of a kernel
sigs = ["(int64, int64)", "(float64, float64)"]
f = self.add_device_usecase(sigs)
# Exact signature matches
r = np.zeros(1, dtype=np.int64)
f[1, 1](r, 1, 2)
self.assertPreciseEqual(r[0], 3)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1.5, 2.5)
self.assertPreciseEqual(r[0], 4.0)
if config.ENABLE_CUDASIM:
# Pass - we can't check for no conversion on the simulator.
return
# No conversion
with self.assertRaises(TypingError) as cm:
r = np.zeros(1, dtype=np.complex128)
f[1, 1](r, 1j, 1j)
msg = str(cm.exception)
self.assertIn("Invalid use of type", msg)
self.assertIn("with parameters (complex128, complex128)", msg)
self.assertEqual(len(f.overloads), 2, f.overloads)
def test_explicit_signatures_device_same_type_class(self):
# A more interesting one...
# (Note that the type of r is deliberately float64 in both cases so
# that dispatch is differentiated on the types of x and y only, to
# closely preserve the intent of the original test from
# numba.tests.test_dispatcher)
sigs = ["(float32, float32)", "(float64, float64)"]
f = self.add_device_usecase(sigs)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, np.float32(1), np.float32(2**-25))
self.assertPreciseEqual(r[0], 1.0)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1, 2**-25)
self.assertPreciseEqual(r[0], 1.0000000298023224)
def test_explicit_signatures_device_ambiguous(self):
# Ambiguity between the two best overloads resolves. This is somewhat
# surprising given that ambiguity is not permitted for dispatching
# overloads when launching a kernel, but seems to be the general
# behaviour of Numba (See Issue #8307:
# https://github.com/numba/numba/issues/8307).
sigs = ["(float32, float64)", "(float64, float32)", "(int64, int64)"]
f = self.add_device_usecase(sigs)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, 1.5, 2.5)
self.assertPreciseEqual(r[0], 4.0)
@skip_on_cudasim('CUDA Simulator does not force casting')
def test_explicit_signatures_device_unsafe(self):
# These tests are from test_explicit_signatures. The device function
# variant of these tests can succeed on CUDA because the compilation
# can handle unsafe casting (c.f. test_explicit_signatures_unsafe which
# has to xfail due to _prepare_args not supporting unsafe casting).
sigs = ["(int64, int64)"]
f = self.add_device_usecase(sigs)
# Approximate match (unsafe conversion)
r = np.zeros(1, dtype=np.int64)
f[1, 1](r, 1.5, 2.5)
self.assertPreciseEqual(r[0], 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
sigs = ["(int64, int64)", "(float64, float64)"]
f = self.add_device_usecase(sigs)
# Approximate match (int32 -> float64 is a safe conversion)
r = np.zeros(1, dtype=np.float64)
f[1, 1](r, np.int32(1), 2.5)
self.assertPreciseEqual(r[0], 3.5)
def test_dispatcher_docstring(self):
# Ensure that CUDA-jitting a function preserves its docstring. See
# Issue #5902: https://github.com/numba/numba/issues/5902
@cuda.jit
def add_kernel(a, b):
"""Add two integers, kernel version"""
@cuda.jit(device=True)
def add_device(a, b):
"""Add two integers, device version"""
self.assertEqual("Add two integers, kernel version", add_kernel.__doc__)
self.assertEqual("Add two integers, device version", add_device.__doc__)
@skip_on_cudasim("CUDA simulator doesn't implement kernel properties")
| TestDispatcher |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlalchemy_batch_data.py | {
"start": 549,
"end": 19233
} | class ____(BatchData):
"""A class which represents a SQL alchemy batch, with properties including the construction of the batch itself
and several getters used to access various properties.""" # noqa: E501 # FIXME CoP
# Instantiating SqlAlchemyBatchData with table_name and schema_name
@overload
def __init__(
self,
execution_engine,
# Option 1
schema_name: Optional[str] = ...,
table_name: str = ...,
# Option 2
query: None = ...,
# Option 3
selectable: None = ...,
create_temp_table: bool = ...,
temp_table_schema_name: None = ...,
use_quoted_name: bool = ...,
source_schema_name: None = ...,
source_table_name: None = ...,
) -> None: ...
@overload
def __init__(
self,
execution_engine,
# Option 1
schema_name: None = ...,
table_name: None = ...,
# Option 2
query: str = ...,
# Option 3
selectable: None = ...,
create_temp_table: bool = ...,
temp_table_schema_name: Optional[str] = ...,
use_quoted_name: bool = ...,
source_schema_name: None = ...,
source_table_name: None = ...,
) -> None: ...
@overload
def __init__(
self,
execution_engine,
# Option 1
schema_name: None = ...,
table_name: None = ...,
# Option 2
query: None = ...,
# Option 3
selectable: Selectable = ...,
create_temp_table: bool = ...,
temp_table_schema_name: Optional[str] = ...,
use_quoted_name: bool = ...,
source_schema_name: Optional[str] = ...,
source_table_name: Optional[str] = ...,
) -> None: ...
def __init__( # noqa: PLR0913 # FIXME CoP
self,
execution_engine,
# Option 1
schema_name: Optional[str] = None,
table_name: Optional[str] = None,
# Option 2
query: Optional[str] = None,
# Option 3
selectable: Optional[Selectable] = None,
create_temp_table: bool = True,
temp_table_schema_name: Optional[str] = None,
use_quoted_name: bool = False,
source_schema_name: Optional[str] = None,
source_table_name: Optional[str] = None,
) -> None:
"""A Constructor used to initialize and SqlAlchemy Batch, create an id for it, and verify that all necessary
parameters have been provided. Builds a temporary table for the `query` if `create_temp_table=True`.
Args:
engine (SqlAlchemy Engine): \
A SqlAlchemy Engine or connection that will be used to access the data
schema_name (string or None): \
The name of the schema_name in which the databases lie
table_name (string or None): \
The name of the table that will be accessed. Either this parameter or the query parameter must be
specified. Default is 'None'.
query (string or None): \
A query string representing a domain, which will be used to create a temporary table
selectable (Sqlalchemy Selectable or None): \
A SqlAlchemy selectable representing a domain, which will be used to create a temporary table
create_temp_table (bool): \
When building the batch data object from a query, this flag determines whether a temporary table should
be created against which to validate data from the query. If False, a subselect statement will be used
in each validation.
temp_table_schema_name (str or None): \
The name of the schema in which a temporary table should be created. If None, the default schema will be
used if a temporary table is requested.
use_quoted_name (bool): \
If true, names should be quoted to preserve case sensitivity on databases that usually normalize them
source_table_name (str): \
For SqlAlchemyBatchData based on selectables, source_table_name provides the name of the table on which
the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types)
source_schema_name (str): \
For SqlAlchemyBatchData based on selectables, source_schema_name provides the name of the schema on which
the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types)
The query that will be executed against the DB can be determined in any of three ways:
1. Specify a `schema_name` and `table_name`. This will query the whole table as a record_set. If schema_name is None, then the default schema will be used.
2. Specify a `query`, which will be executed as-is to fetch the record_set. NOTE Abe 20201118 : This functionality is currently untested.
3. Specify a `selectable`, which will be to fetch the record_set. This is the primary path used by DataConnectors.
In the case of (2) and (3) you have the option to execute the query either as a temporary table, or as a subselect statement.
In general, temporary tables invite more optimization from the query engine itself. Subselect statements may sometimes be preferred, because they do not require write access on the database.
""" # noqa: E501 # FIXME CoP
super().__init__(execution_engine=execution_engine)
engine = execution_engine.engine
self._engine = engine
self._schema_name = schema_name
self._use_quoted_name = use_quoted_name
self._source_table_name = source_table_name
self._source_schema_name = source_schema_name
if sum(bool(x) for x in [table_name, query, selectable is not None]) != 1:
raise ValueError("Exactly one of table_name, query, or selectable must be specified") # noqa: TRY003 # FIXME CoP
elif (query and schema_name) or (selectable is not None and schema_name):
raise ValueError( # noqa: TRY003 # FIXME CoP
"schema_name can only be used with table_name. Use temp_table_schema_name to provide a target schema for creating a temporary table." # noqa: E501 # FIXME CoP
)
dialect_name: str = engine.dialect.name.lower()
try:
dialect = GXSqlDialect(dialect_name)
except ValueError:
dialect = GXSqlDialect.OTHER
self._dialect = dialect
if table_name:
self._selectable = self._generate_selectable_from_schema_name_and_table_name(
dialect=dialect,
use_quoted_name=use_quoted_name,
table_name=table_name,
schema_name=schema_name,
)
elif query:
self._selectable = self._generate_selectable_from_query( # type: ignore[call-overload] # https://github.com/python/mypy/issues/14764
query, dialect, create_temp_table, temp_table_schema_name
)
else:
self._selectable = self._generate_selectable_from_selectable(
selectable, dialect, create_temp_table, temp_table_schema_name
)
@property
def dialect(self) -> GXSqlDialect:
return self._dialect
@property
def sql_engine_dialect(self) -> sqlalchemy.DefaultDialect:
"""Returns the Batches' current engine dialect"""
return self._engine.dialect
@property
def source_table_name(self):
return self._source_table_name
@property
def source_schema_name(self):
return self._source_schema_name
@property
def selectable(self):
return self._selectable
@property
def use_quoted_name(self):
return self._use_quoted_name
def _create_temporary_table( # noqa: C901, PLR0912 # FIXME CoP
self,
dialect: GXSqlDialect,
query: str,
temp_table_schema_name: str | None = None,
) -> Tuple[str, str]:
"""
Create Temporary table based on sql query. This will be used as a basis for executing expectations.
:param query:
""" # noqa: E501 # FIXME CoP
temp_table_name = generate_temporary_table_name()
# mssql expects all temporary table names to have a prefix '#'
if dialect == GXSqlDialect.MSSQL:
temp_table_name = f"#{temp_table_name}"
dialect = self.dialect
stmt: str = ""
# dialects that support temp schemas
if temp_table_schema_name is not None and dialect in [
GXSqlDialect.BIGQUERY,
GXSqlDialect.SNOWFLAKE,
GXSqlDialect.VERTICA,
]:
temp_table_name = f"{temp_table_schema_name}.{temp_table_name}"
if dialect == GXSqlDialect.BIGQUERY:
# BigQuery Table is created using with an expiration of 24 hours using Google's Data Definition Language # noqa: E501 # FIXME CoP
# https://stackoverflow.com/questions/20673986/how-to-create-temporary-table-in-google-bigquery
stmt = f"""CREATE OR REPLACE TABLE `{temp_table_name}`
OPTIONS(
expiration_timestamp=TIMESTAMP_ADD(
CURRENT_TIMESTAMP(), INTERVAL 24 HOUR)
)
AS {query}"""
elif dialect == GXSqlDialect.DATABRICKS:
stmt = f"CREATE TEMPORARY VIEW `{temp_table_name}` AS {query}"
elif dialect == GXSqlDialect.DREMIO:
stmt = f"CREATE OR REPLACE VDS {temp_table_name} AS {query}"
elif dialect == GXSqlDialect.SNOWFLAKE:
stmt = f"CREATE OR REPLACE TEMPORARY TABLE {temp_table_name} AS {query}"
elif dialect == GXSqlDialect.MYSQL:
stmt = f"CREATE TEMPORARY TABLE {temp_table_name} AS {query}"
elif dialect == GXSqlDialect.HIVE:
stmt = f"CREATE TEMPORARY TABLE `{temp_table_name}` AS {query}"
elif dialect == GXSqlDialect.MSSQL:
# Insert "into #{temp_table_name}" in the custom sql query right before the "from" clause # noqa: E501 # FIXME CoP
# Partition is case-sensitive so detect case.
# Note: transforming query to uppercase/lowercase has unintended consequences (i.e.,
# changing column names), so this is not an option!
# noinspection PyUnresolvedReferences
if isinstance(query, sa.dialects.mssql.base.MSSQLCompiler):
query = query.string # extracting string from MSSQLCompiler object
if "from" in query:
strsep = "from"
else:
strsep = "FROM"
querymod = query.split(strsep, maxsplit=1)
stmt = f"{querymod[0]}into {{temp_table_name}} from{querymod[1]}".format(
temp_table_name=temp_table_name
)
# TODO: <WILL> logger.warning is emitted in situations where a permanent TABLE is created in _create_temporary_table() # noqa: E501 # FIXME CoP
# Similar message may be needed in the future for Trino backend.
elif dialect in (GXSqlDialect.TRINO, GXSqlDialect.CLICKHOUSE):
logger.warning(
f"GX has created permanent view {temp_table_name}"
" as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE."
)
stmt = f"CREATE TABLE {temp_table_name} AS {query}"
elif dialect == GXSqlDialect.AWSATHENA:
logger.warning(
f"GX has created permanent TABLE {temp_table_name}"
" as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE."
)
stmt = f"CREATE TABLE {temp_table_name} AS {query}"
# Please note that Teradata is currently experimental (as of 0.13.43)
elif dialect == GXSqlDialect.TERADATASQL:
stmt = (
f'CREATE VOLATILE TABLE "{temp_table_name}" AS ({query})'
" WITH DATA NO PRIMARY INDEX ON COMMIT PRESERVE ROWS"
)
elif dialect == GXSqlDialect.VERTICA:
stmt = f"CREATE TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE ROWS AS {query}"
else:
stmt = f'CREATE TEMPORARY TABLE "{temp_table_name}" AS {query}'
if dialect == GXSqlDialect.ORACLE:
# oracle 18c introduced PRIVATE temp tables which are transient objects
stmt_1 = (
f"CREATE PRIVATE TEMPORARY TABLE {temp_table_name}"
f" ON COMMIT PRESERVE DEFINITION AS {query}"
)
# prior to oracle 18c only GLOBAL temp tables existed and only the data is transient
# this means an empty table will persist after the db session
stmt_2 = (
f"CREATE GLOBAL TEMPORARY TABLE {temp_table_name}"
f" ON COMMIT PRESERVE ROWS AS {query}"
)
try:
self.execution_engine.execute_query_in_transaction(sa.text(stmt_1))
except sqlalchemy.DatabaseError:
self.execution_engine.execute_query_in_transaction(sa.text(stmt_2))
else:
self.execution_engine.execute_query_in_transaction(sa.text(stmt))
return (stmt, temp_table_name)
def _generate_selectable_from_schema_name_and_table_name(
self,
dialect: GXSqlDialect,
use_quoted_name: bool,
table_name: str,
schema_name: Optional[str] = None,
) -> sqlalchemy.Table:
"""Helper method to generate selectable using schema and table name
Args:
dialect (GXSqlDialect): Needed to check for BigQuery, which needs to be handled differently.
use_quoted_name (bool): To be passed to sqlalchemy.
table_name (str): Table name to build selectable from.
schema_name (Optional[str], optional): Optional schema name. Defaults to None.
Returns:
sqlalchemy.Table: SqlAlchemy Table that is Selectable.
""" # noqa: E501 # FIXME CoP
if use_quoted_name:
table_name = sqlalchemy.quoted_name(table_name, quote=True)
if dialect == GXSqlDialect.BIGQUERY:
if schema_name is not None:
logger.warning(
"schema_name should not be used when passing a table_name for biquery. Instead, include the schema name in the table_name string." # noqa: E501 # FIXME CoP
)
# In BigQuery the table name is already qualified with its schema name
return sa.Table(
table_name,
sa.MetaData(),
schema=None,
)
return sa.Table(
table_name,
sa.MetaData(),
schema=schema_name,
)
@overload
def _generate_selectable_from_query(
self,
query: str,
dialect: GXSqlDialect,
create_temp_table: Literal[True],
temp_table_schema_name: Optional[str] = ...,
) -> sqlalchemy.Table: ...
@overload
def _generate_selectable_from_query(
self,
query: str,
dialect: GXSqlDialect,
create_temp_table: Literal[False],
temp_table_schema_name: Optional[str] = ...,
) -> sqlalchemy.TextClause: ...
def _generate_selectable_from_query(
self,
query: str,
dialect: GXSqlDialect,
create_temp_table: bool,
temp_table_schema_name: Optional[str] = None,
) -> sqlalchemy.Table | sqlalchemy.TextClause:
"""Helper method to generate Selectable from query string.
Args:
query (str): query passed in as RuntimeBatchRequest.
dialect (GXSqlDialect): Needed for _create_temporary_table, since different backends name temp_tables differently.
create_temp_table (bool): Should we create a temp_table? If not a `TextClause` will be returned instead of a Table.
temp_table_schema_name (Optional[str], optional): Optional string for temp_table schema. Defaults to None.
Returns:
sqlalchemy.Table: SqlAlchemy Table that is Selectable or a TextClause.
""" # noqa: E501 # FIXME CoP
if not create_temp_table:
return sa.text(query)
_, temp_table_name = self._create_temporary_table(
dialect=dialect,
query=query,
temp_table_schema_name=temp_table_schema_name,
)
return sa.Table(
temp_table_name,
sa.MetaData(),
schema=temp_table_schema_name,
)
def _generate_selectable_from_selectable(
self,
selectable,
dialect: GXSqlDialect,
create_temp_table: bool,
temp_table_schema_name: Optional[str] = None,
) -> sqlalchemy.Table:
"""Helper method to generate Selectable from Selectable that is passed into __init__.
This method is needed to either turn the passed-in Selectable as an alias, or to create a temp_table that refers to it.
Args:
selectable: selectable that is passed into SqlAlchemyBatchData's init method. It may contain sampling and partitioning directives.
dialect (GXSqlDialect): Needed for _create_temporary_table, since different backends name temp_tables differently.
create_temp_table (bool): Should we create a temp_table?
temp_table_schema_name (Optional[str], optional): Optional string for temp_table schema. Defaults to None.
Returns:
sqlalchemy.Table: SqlAlchemy Table that is Selectable.
""" # noqa: E501 # FIXME CoP
if not create_temp_table:
return selectable.alias()
if dialect in [GXSqlDialect.ORACLE, GXSqlDialect.MSSQL] and isinstance(selectable, str):
# oracle, mssql query could already be passed as a string
query = selectable
else:
# compile selectable to sql statement
query = selectable.compile(
dialect=self.sql_engine_dialect,
compile_kwargs={"literal_binds": True},
)
_, temp_table_name = self._create_temporary_table(
dialect=dialect,
query=query,
temp_table_schema_name=temp_table_schema_name,
)
return sa.Table(
temp_table_name,
sa.MetaData(),
schema=temp_table_schema_name,
)
| SqlAlchemyBatchData |
python | google__pytype | pytype/tests/test_pickle1.py | {
"start": 197,
"end": 7187
} | class ____(test_base.BaseTest):
"""Tests for loading and saving pickled files."""
def _verifyDeps(self, module, immediate_deps, late_deps):
if isinstance(module, bytes):
data = pickle_utils.DecodeAst(module)
self.assertCountEqual(dict(data.dependencies), immediate_deps)
self.assertCountEqual(dict(data.late_dependencies), late_deps)
else:
c = visitors.CollectDependencies()
module.Visit(c)
self.assertCountEqual(c.dependencies, immediate_deps)
self.assertCountEqual(c.late_dependencies, late_deps)
def test_type(self):
pickled = self.Infer(
"""
x = type
""",
pickle=True,
module_name="foo",
)
with test_utils.Tempdir() as d:
u = d.create_file("u.pickled", pickled)
ty = self.Infer(
"""
import u
r = u.x
""",
pythonpath=[""],
imports_map={"u": u},
)
self.assertTypesMatchPytd(
ty,
"""
import u
from typing import Type
r = ... # type: Type[type]
""",
)
def test_copy_class_into_output(self):
pickled_foo = self.Infer(
"""
import datetime
a = 42
timedelta = datetime.timedelta # copy class
""",
pickle=True,
module_name="foo",
)
self._verifyDeps(pickled_foo, ["builtins"], ["datetime"])
with test_utils.Tempdir() as d:
foo = d.create_file("foo.pickled", pickled_foo)
pickled_bar = self.Infer(
"""
import foo
timedelta = foo.timedelta # copy class
""",
pickle=True,
pythonpath=[""],
imports_map={"foo": foo},
module_name="bar",
)
self._verifyDeps(pickled_bar, ["builtins"], ["datetime"])
bar = d.create_file("bar.pickled", pickled_bar)
ty = self.Infer(
"""
import bar
r = bar.timedelta(0)
""",
pythonpath=[""],
imports_map={"foo": foo, "bar": bar},
)
self._verifyDeps(ty, ["datetime"], [])
self.assertTypesMatchPytd(
ty,
"""
import datetime
import bar
r = ... # type: datetime.timedelta
""",
)
def test_optimize_on_late_types(self):
with test_utils.Tempdir() as d:
pickled_foo = self.Infer(
"""
class X: pass
""",
pickle=True,
module_name="foo",
)
self._verifyDeps(pickled_foo, ["builtins"], [])
foo = d.create_file("foo.pickled", pickled_foo)
pickled_bar = self.Infer(
"""
import foo
def f():
return foo.X()
""",
pickle=True,
pythonpath=[""],
imports_map={"foo": foo},
module_name="bar",
)
bar = d.create_file("bar.pickled", pickled_bar)
self._verifyDeps(pickled_bar, [], ["foo"])
self.Infer(
"""
import bar
f = bar.f
""",
imports_map={"foo": foo, "bar": bar},
)
def test_file_change(self):
with test_utils.Tempdir() as d:
pickled_xy = self.Infer(
"""
class X: pass
class Y: pass
""",
pickle=True,
module_name="foo",
)
pickled_x = self.Infer(
"""
class X: pass
""",
pickle=True,
module_name="foo",
)
foo = d.create_file("foo.pickled", pickled_xy)
pickled_bar = self.Infer(
"""
import foo
class A(foo.X): pass
class B(foo.Y): pass
""",
pickle=True,
imports_map={"foo": foo},
module_name="bar",
)
self._verifyDeps(pickled_bar, [], ["foo"])
bar = d.create_file("bar.pickled", pickled_bar)
# Now, replace the old foo.pickled with a version that doesn't have Y
# anymore.
foo = d.create_file("foo.pickled", pickled_x)
self.Infer(
"""
import bar
a = bar.A()
b = bar.B()
""",
imports_map={"foo": foo, "bar": bar},
)
# Also try deleting the file.
d.delete_file("foo.pickled")
self.Infer(
"""
import bar
a = bar.A()
b = bar.B()
""",
imports_map={"foo": foo, "bar": bar},
)
def test_file_rename(self):
with test_utils.Tempdir() as d:
pickled_other_foo = self.Infer(
"""
class Foo: pass
""",
pickle=True,
module_name="bar",
)
other_foo = d.create_file("empty.pickled", pickled_other_foo)
pickled_foo = self.Infer(
"""
class Foo:
def __init__(self): pass
x = Foo()
""",
pickle=True,
module_name="foo",
)
foo = d.create_file("foo.pickled", pickled_foo)
self.Infer(
"""
import bar
bar.Foo()
""",
pickle=True,
imports_map={"bar": foo, "foo": other_foo}, # rename to "bar"
module_name="baz",
)
def test_optimize(self):
with test_utils.Tempdir() as d:
pickled_foo = self._PickleSource(
"""
import UserDict
class Foo: ...
@overload
def f(self, x: Foo, y: UserDict.UserDict): ...
@overload
def f(self, x: UserDict.UserDict, y: Foo): ...
""",
module_name="foo",
)
self._verifyDeps(pickled_foo, ["builtins", "foo"], ["UserDict"])
foo = d.create_file("foo.pickled", pickled_foo)
self.assertNoCrash(
self.Infer,
"""
import foo
class Bar:
f = foo.f
""",
imports_map={"foo": foo},
module_name="bar",
)
def test_function_type(self):
self.ConfigureOptions(
module_name="bar", pythonpath=[""], use_pickled_files=True
)
pickled_foo = self._PickleSource(
"""
import UserDict
def f(x: UserDict.UserDict) -> None: ...
""",
module_name="foo",
)
with test_utils.Tempdir() as d:
foo = d.create_file("foo.pickled", pickled_foo)
self.options.tweak(imports_map={"foo": foo})
pickled_bar = self._PickleSource(
"""
from foo import f # Alias(name="f", type=Function("foo.f", ...))
""",
module_name="bar",
)
bar = d.create_file("bar.pickled", pickled_bar)
self.assertNoCrash(
self.Infer,
"""
import bar
bar.f(42)
""",
imports_map={"foo": foo, "bar": bar},
module_name="baz",
)
def test_class_decorator(self):
foo = """
from typing_extensions import final
@final
class A:
def f(self): ...
"""
with self.DepTree([("foo.py", foo, {"pickle": True})]):
self.CheckWithErrors("""
import foo
class B(foo.A): # final-error
pass
""")
if __name__ == "__main__":
test_base.main()
| PickleTest |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 43867,
"end": 45309
} | class ____(TestCase):
case2method = {
"<=": "issubset",
">=": "issuperset",
}
reverse = {
"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
if type(self) is TestSubsets:
raise unittest.SkipTest("Only meant to be run as subclass")
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
# ------------------------------------------------------------------------------
| TestSubsets |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 6179,
"end": 7194
} | class ____(ContinuousTicker):
''' Generate "nice" round ticks at any magnitude.
Creates ticks that are "base" multiples of a set of given
mantissas. For example, with ``base=10`` and
``mantissas=[1, 2, 5]``, the ticker will generate the sequence::
..., 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, ...
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
base = Float(10.0, help="""
The multiplier to use for scaling mantissas.
""")
mantissas = Seq(Float, default=[1, 2, 5], help="""
The acceptable list numbers to generate multiples of.
""")
min_interval = Float(0.0, help="""
The smallest allowable interval between two adjacent ticks.
""")
max_interval = Nullable(Float, help="""
The largest allowable interval between two adjacent ticks.
.. note::
To specify an unbounded interval, set to ``None``.
""")
| AdaptiveTicker |
python | apache__airflow | providers/keycloak/tests/unit/keycloak/auth_manager/routes/test_token.py | {
"start": 978,
"end": 2251
} | class ____:
token = "token"
token_body_dict = {"username": "username", "password": "password"}
@conf_vars(
{
("api_auth", "jwt_expiration_time"): "10",
}
)
@patch("airflow.providers.keycloak.auth_manager.routes.token.create_token_for")
def test_create_token(self, mock_create_token_for, client):
mock_create_token_for.return_value = self.token
response = client.post(
AUTH_MANAGER_FASTAPI_APP_PREFIX + "/token",
json=self.token_body_dict,
)
assert response.status_code == 201
assert response.json() == {"access_token": self.token}
@conf_vars(
{
("api_auth", "jwt_cli_expiration_time"): "10",
("api_auth", "jwt_expiration_time"): "10",
}
)
@patch("airflow.providers.keycloak.auth_manager.routes.token.create_token_for")
def test_create_token_cli(self, mock_create_token_for, client):
mock_create_token_for.return_value = self.token
response = client.post(
AUTH_MANAGER_FASTAPI_APP_PREFIX + "/token/cli",
json=self.token_body_dict,
)
assert response.status_code == 201
assert response.json() == {"access_token": self.token}
| TestTokenRouter |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-artifact-editor/tests/test_artifact_editor.py | {
"start": 603,
"end": 16483
} | class ____(BaseModel):
"""Simple model for basic testing."""
value: str
number: Optional[int] = None
optional_number: Optional[int] = None
@pytest.fixture
def editor():
return ArtifactEditorToolSpec(Person)
@pytest.fixture
def simple_editor():
return ArtifactEditorToolSpec(SimpleModel)
def test_create_artifact(editor: ArtifactEditorToolSpec):
"""Test creating an initial artifact."""
result = editor.create_artifact(
name="John Doe", age=30, email="john@example.com", tags=["developer", "python"]
)
expected = {
"name": "John Doe",
"age": 30,
"email": "john@example.com",
"tags": ["developer", "python"],
"address": None,
}
assert result == expected
assert editor.get_current_artifact() == expected
def test_create_artifact_with_nested_object(editor: ArtifactEditorToolSpec):
"""Test creating artifact with nested objects."""
address_data = {
"street": "123 Main St",
"city": "Springfield",
"zipcode": "12345",
"country": None,
}
result = editor.create_artifact(name="Jane Doe", age=25, address=address_data)
assert result["address"] == address_data
assert isinstance(editor.current_artifact.address, Address)
def test_get_current_artifact(editor: ArtifactEditorToolSpec):
"""Test getting the current artifact."""
# Test when no artifact exists
assert editor.get_current_artifact() is None
# Create an artifact and test retrieval
editor.create_artifact(name="Test User", age=20)
result = editor.get_current_artifact()
expected = {
"name": "Test User",
"age": 20,
"email": None,
"tags": [],
"address": None,
}
assert result == expected
assert editor.get_current_artifact() == expected
def test_apply_patch_replace_operation(editor: ArtifactEditorToolSpec):
"""Test applying replace operations."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value="Jane"),
PatchOperation(op="replace", path="/age", value=25),
]
)
result = editor.apply_patch(patch)
assert result["name"] == "Jane"
assert result["age"] == 25
assert editor.get_current_artifact() == result
def test_apply_patch_add_operation(editor: ArtifactEditorToolSpec):
"""Test applying add operations."""
editor.create_artifact(name="John", age=30, tags=["python"])
patch = JsonPatch(
operations=[
PatchOperation(op="add", path="/email", value="john@example.com"),
PatchOperation(op="add", path="/tags/1", value="developer"),
PatchOperation(op="add", path="/tags/-", value="expert"), # Append to array
]
)
result = editor.apply_patch(patch)
assert result["email"] == "john@example.com"
assert result["tags"] == ["python", "developer", "expert"]
assert editor.get_current_artifact() == result
def test_apply_patch_remove_operation(editor: ArtifactEditorToolSpec):
"""Test applying remove operations."""
editor.create_artifact(
name="John",
age=30,
email="john@example.com",
tags=["python", "developer", "expert"],
)
patch = JsonPatch(
operations=[
PatchOperation(op="remove", path="/email"),
PatchOperation(op="remove", path="/tags/1"), # Remove "developer"
]
)
result = editor.apply_patch(patch)
assert result["email"] is None
assert result["tags"] == ["python", "expert"]
def test_apply_patch_move_operation(simple_editor: ArtifactEditorToolSpec):
"""Test applying move operations."""
simple_editor.create_artifact(value="test", number=42)
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/optional_number", from_path="/number")
]
)
result = simple_editor.apply_patch(patch)
# Note: This test assumes we're moving the value, not the key
# The actual behavior depends on the implementation details
assert result["number"] is None
assert result["optional_number"] == 42
def test_apply_patch_copy_operation(editor: ArtifactEditorToolSpec):
"""Test applying copy operations."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="copy", path="/email", from_path="/name")]
)
result = editor.apply_patch(patch)
assert result["email"] == "John"
assert result["name"] == "John" # Original should still exist
def test_apply_patch_nested_paths(editor: ArtifactEditorToolSpec):
"""Test operations on nested object paths."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/address/city", value="New York"),
PatchOperation(op="add", path="/address/country", value="USA"),
]
)
result = editor.apply_patch(patch)
assert result["address"]["city"] == "New York"
assert result["address"]["country"] == "USA"
def test_apply_patch_array_operations(editor: ArtifactEditorToolSpec):
"""Test various array operations."""
editor.create_artifact(name="John", age=30, tags=["python", "java", "go"])
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/tags/1", value="javascript"),
PatchOperation(op="add", path="/tags/0", value="rust"),
PatchOperation(
op="remove", path="/tags/3"
), # Remove "java" (now at index 3)
]
)
result = editor.apply_patch(patch)
# Expected: ["rust", "python", "javascript", "go"]
assert "rust" in result["tags"]
assert "javascript" in result["tags"]
def test_path_parsing():
"""Test path parsing functionality."""
editor = ArtifactEditorToolSpec(Person)
# Test basic path parsing
assert editor._parse_path("/") == []
assert editor._parse_path("/name") == ["name"]
assert editor._parse_path("/tags/0") == ["tags", 0]
assert editor._parse_path("/address/street") == ["address", "street"]
# Test escaped characters
assert editor._parse_path("/field~0name") == ["field~name"]
assert editor._parse_path("/field~1name") == ["field/name"]
def test_invalid_path_format(editor: ArtifactEditorToolSpec):
"""Test error handling for invalid path formats."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="invalid_path", value="test")]
)
with pytest.raises(ValueError, match="Path must start with"):
editor.apply_patch(patch)
def test_nonexistent_path(editor: ArtifactEditorToolSpec):
"""Test error handling for nonexistent paths."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/nonexistent", value="test")]
)
with pytest.raises(ValueError):
editor.apply_patch(patch)
def test_array_index_out_of_range(editor: ArtifactEditorToolSpec):
"""Test error handling for array index out of range."""
editor.create_artifact(name="John", age=30, tags=["python"])
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/tags/5", value="test")]
)
with pytest.raises(ValueError, match="Failed to apply operation"):
editor.apply_patch(patch)
def test_invalid_operation_type(editor: ArtifactEditorToolSpec):
"""Test error handling for invalid operation types."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="invalid_op", path="/name", value="test")]
)
with pytest.raises(ValueError, match="Unknown operation"):
editor.apply_patch(patch)
def test_move_without_from_path(editor: ArtifactEditorToolSpec):
"""Test error handling for move operation without from_path."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/name", value="test") # Missing from_path
]
)
with pytest.raises(ValueError, match="'move' operation requires 'from_path'"):
editor.apply_patch(patch)
def test_copy_without_from_path(editor: ArtifactEditorToolSpec):
"""Test error handling for copy operation without from_path."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="copy", path="/email", value="test") # Missing from_path
]
)
with pytest.raises(ValueError, match="'copy' operation requires 'from_path'"):
editor.apply_patch(patch)
def test_patch_validation_error(editor: ArtifactEditorToolSpec):
"""Test error handling when patch results in invalid model."""
editor.create_artifact(name="John", age=30)
# Try to set age to a string, which should violate the model
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value=None) # Required field
]
)
with pytest.raises(ValueError, match="Patch resulted in invalid"):
editor.apply_patch(patch)
def test_patch_from_dict(editor: ArtifactEditorToolSpec):
"""Test applying patch from dictionary format."""
editor.create_artifact(name="John", age=30)
patch_dict = {"operations": [{"op": "replace", "path": "/name", "value": "Jane"}]}
result = editor.apply_patch(patch_dict)
assert result["name"] == "Jane"
def test_patch_from_json_string(editor: ArtifactEditorToolSpec):
"""Test applying patch from JSON string format."""
editor.create_artifact(name="John", age=30)
patch_json = '{"operations": [{"op": "replace", "path": "/name", "value": "Jane"}]}'
result = editor.apply_patch(patch_json)
assert result["name"] == "Jane"
def test_to_tool_list(editor: ArtifactEditorToolSpec):
"""Test converting to tool list includes all expected tools."""
tools = editor.to_tool_list()
# Should have 3 tools: apply_patch, get_current_artifact, create_artifact
assert len(tools) == 3
tool_names = [tool.metadata.name for tool in tools]
assert "apply_patch" in tool_names
assert "get_current_artifact" in tool_names
assert "create_artifact" in tool_names
def test_no_current_artifact_apply_patch(editor: ArtifactEditorToolSpec):
"""Test error when trying to apply patch without current artifact."""
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/name", value="Jane")]
)
with pytest.raises(AttributeError):
editor.apply_patch(patch)
def test_complex_nested_operations(editor: ArtifactEditorToolSpec):
"""Test complex operations on deeply nested structures."""
complex_data = {
"name": "John",
"age": 30,
"address": {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"},
"tags": ["python", "developer"],
}
editor.create_artifact(**complex_data)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/address/street", value="456 Oak Ave"),
PatchOperation(op="add", path="/tags/-", value="senior"),
PatchOperation(op="remove", path="/tags/0"), # Remove "python"
]
)
result = editor.apply_patch(patch)
assert result["address"]["street"] == "456 Oak Ave"
assert "senior" in result["tags"]
assert "python" not in result["tags"]
assert result["age"] == 30
def test_set_invalid_field_path(editor: ArtifactEditorToolSpec):
"""Test setting a field that doesn't exist in the Pydantic model schema."""
editor.create_artifact(name="John", age=30)
# Try to add a field that doesn't exist in the Person model
patch = JsonPatch(
operations=[PatchOperation(op="add", path="/invalid_field", value="test")]
)
# This should raise an error since invalid_field is not in the Person model
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
def test_set_invalid_nested_field_path(editor: ArtifactEditorToolSpec):
"""Test setting a nested field that doesn't exist in the Pydantic model schema."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
# Try to add a field that doesn't exist in the Address model
patch = JsonPatch(
operations=[
PatchOperation(op="add", path="/address/invalid_nested_field", value="test")
]
)
# This should raise an error since invalid_nested_field is not in the Address model
with pytest.raises(ValueError, match="Invalid field 'invalid_nested_field'"):
editor.apply_patch(patch)
def test_valid_nested_field_addition(editor: ArtifactEditorToolSpec):
"""Test adding a valid nested field that exists in the model schema."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
# Add the country field which exists in the Address model
patch = JsonPatch(
operations=[PatchOperation(op="add", path="/address/country", value="USA")]
)
result = editor.apply_patch(patch)
assert result["address"]["country"] == "USA"
def test_validation_with_array_access(editor: ArtifactEditorToolSpec):
"""Test validation works correctly with array access patterns."""
editor.create_artifact(name="John", age=30, tags=["python", "developer"])
# Valid array operations should work
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/tags/0", value="rust"),
PatchOperation(op="add", path="/tags/-", value="expert"),
]
)
result = editor.apply_patch(patch)
assert result["tags"] == ["rust", "developer", "expert"]
def test_validation_does_not_affect_existing_operations(editor: ArtifactEditorToolSpec):
"""Test that validation doesn't break existing valid operations."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# All these operations should still work
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value="Jane"),
PatchOperation(op="replace", path="/age", value=25),
PatchOperation(op="remove", path="/email"),
]
)
result = editor.apply_patch(patch)
assert result["name"] == "Jane"
assert result["age"] == 25
assert result["email"] is None
def test_move_operation_validates_target_path(editor: ArtifactEditorToolSpec):
"""Test that move operations validate the target path."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# Try to move to an invalid field
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/invalid_field", from_path="/email")
]
)
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
def test_copy_operation_validates_target_path(editor: ArtifactEditorToolSpec):
"""Test that copy operations validate the target path."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# Try to copy to an invalid field
patch = JsonPatch(
operations=[
PatchOperation(op="copy", path="/invalid_field", from_path="/email")
]
)
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
| SimpleModel |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/handlers/base.py | {
"start": 19539,
"end": 27207
} | class ____:
"""A collection of handlers.
Do not instantiate this directly. [The plugin][mkdocstrings.MkdocstringsPlugin] will keep one instance of
this for the purpose of caching. Use [mkdocstrings.MkdocstringsPlugin.get_handler][] for convenient access.
"""
def __init__(
self,
*,
theme: str,
default: str,
inventory_project: str,
inventory_version: str = "0.0.0",
handlers_config: dict[str, HandlerConfig] | None = None,
custom_templates: str | None = None,
mdx: Sequence[str | Extension] | None = None,
mdx_config: Mapping[str, Any] | None = None,
locale: str = "en",
tool_config: Any,
) -> None:
"""Initialize the object.
Arguments:
theme: The theme to use.
default: The default handler to use.
inventory_project: The project name to use in the inventory.
inventory_version: The project version to use in the inventory.
handlers_config: The handlers configuration.
custom_templates: The path to custom templates.
mdx: A list of Markdown extensions to use.
mdx_config: Configuration for the Markdown extensions.
locale: The locale to use for translations.
tool_config: Tool configuration to pass down to handlers.
"""
self._theme = theme
self._default = default
self._handlers_config = handlers_config or {}
self._custom_templates = custom_templates
self._mdx = mdx or []
self._mdx_config = mdx_config or {}
self._handlers: dict[str, BaseHandler] = {}
self._locale = locale
self._tool_config = tool_config
self.inventory: Inventory = Inventory(project=inventory_project, version=inventory_version)
"""The objects inventory."""
self._inv_futures: dict[futures.Future, tuple[BaseHandler, str, Any]] = {}
def get_handler_name(self, config: dict) -> str:
"""Return the handler name defined in an "autodoc" instruction YAML configuration, or the global default handler.
Arguments:
config: A configuration dictionary, obtained from YAML below the "autodoc" instruction.
Returns:
The name of the handler to use.
"""
return config.get("handler", self._default)
def get_handler_config(self, name: str) -> dict:
"""Return the global configuration of the given handler.
Arguments:
name: The name of the handler to get the global configuration of.
Returns:
The global configuration of the given handler. It can be an empty dictionary.
"""
return self._handlers_config.get(name, None) or {}
def get_handler(self, name: str, handler_config: dict | None = None) -> BaseHandler:
"""Get a handler thanks to its name.
This function dynamically imports a module named "mkdocstrings.handlers.NAME", calls its
`get_handler` method to get an instance of a handler, and caches it in dictionary.
It means that during one run (for each reload when serving, or once when building),
a handler is instantiated only once, and reused for each "autodoc" instruction asking for it.
Arguments:
name: The name of the handler. Really, it's the name of the Python module holding it.
handler_config: Configuration passed to the handler.
Returns:
An instance of a subclass of [`BaseHandler`][mkdocstrings.BaseHandler],
as instantiated by the `get_handler` method of the handler's module.
"""
if name not in self._handlers:
if handler_config is None:
handler_config = self._handlers_config.get(name, {})
module = importlib.import_module(f"mkdocstrings_handlers.{name}")
self._handlers[name] = module.get_handler(
theme=self._theme,
custom_templates=self._custom_templates,
mdx=self._mdx,
mdx_config=self._mdx_config,
handler_config=handler_config,
tool_config=self._tool_config,
)
return self._handlers[name]
def _download_inventories(self) -> None:
"""Download an inventory file from an URL.
Arguments:
url: The URL of the inventory.
"""
to_download: list[tuple[BaseHandler, str, Any]] = []
for handler_name, conf in self._handlers_config.items():
handler = self.get_handler(handler_name)
if handler.get_inventory_urls.__func__ is BaseHandler.get_inventory_urls: # type: ignore[attr-defined]
if inv_configs := conf.pop("import", ()):
warn(
"mkdocstrings v1 will stop handling 'import' in handlers configuration. "
"Instead your handler must define a `get_inventory_urls` method "
"that returns a list of URLs to download. ",
DeprecationWarning,
stacklevel=1,
)
inv_configs = [{"url": inv} if isinstance(inv, str) else inv for inv in inv_configs]
inv_configs = [(inv.pop("url"), inv) for inv in inv_configs]
else:
inv_configs = handler.get_inventory_urls()
to_download.extend((handler, url, conf) for url, conf in inv_configs)
if to_download:
# YORE: EOL 3.12: Remove block.
# NOTE: Create context in main thread to fix issue
# https://github.com/mkdocstrings/mkdocstrings/issues/796.
_ = ssl.create_default_context()
thread_pool = futures.ThreadPoolExecutor(4)
for handler, url, conf in to_download:
_logger.debug("Downloading inventory from %s", url)
future = thread_pool.submit(
download_and_cache_url,
url,
datetime.timedelta(days=1),
download=_download_url_with_gz,
)
self._inv_futures[future] = (handler, url, conf)
thread_pool.shutdown(wait=False)
def _yield_inventory_items(self) -> Iterator[tuple[str, str]]:
if self._inv_futures:
_logger.debug("Waiting for %s inventory download(s)", len(self._inv_futures))
futures.wait(self._inv_futures, timeout=30)
# Reversed order so that pages from first futures take precedence:
for fut, (handler, url, conf) in reversed(self._inv_futures.items()):
try:
yield from handler.load_inventory(BytesIO(fut.result()), url, **conf)
except Exception as error: # noqa: BLE001
_logger.error("Couldn't load inventory %s through handler '%s': %s", url, handler.name, error) # noqa: TRY400
self._inv_futures = {}
@property
def seen_handlers(self) -> Iterable[BaseHandler]:
"""Get the handlers that were encountered so far throughout the build.
Returns:
An iterable of instances of [`BaseHandler`][mkdocstrings.BaseHandler]
(usable only to loop through it).
"""
return self._handlers.values()
def teardown(self) -> None:
"""Teardown all cached handlers and clear the cache."""
for future in self._inv_futures:
future.cancel()
for handler in self.seen_handlers:
handler.teardown()
self._handlers.clear()
| Handlers |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_dialect.py | {
"start": 35081,
"end": 56754
} | class ____(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
__only_on__ = "postgresql"
__backend__ = True
@testing.fails_on(["+psycopg2"])
def test_empty_sql_string(self, connection):
result = connection.exec_driver_sql("")
assert result._soft_closed
@testing.provide_metadata
def test_date_reflection(self):
metadata = self.metadata
Table(
"pgdate",
metadata,
Column("date1", DateTime(timezone=True)),
Column("date2", DateTime(timezone=False)),
)
metadata.create_all(testing.db)
m2 = MetaData()
t2 = Table("pgdate", m2, autoload_with=testing.db)
assert t2.c.date1.type.timezone is True
assert t2.c.date2.type.timezone is False
@testing.requires.psycopg2_compatibility
def test_psycopg2_version(self):
v = testing.db.dialect.psycopg2_version
assert testing.db.dialect.dbapi.__version__.startswith(
".".join(str(x) for x in v)
)
@testing.only_on("postgresql+psycopg")
def test_psycopg_version(self):
v = testing.db.dialect.psycopg_version
assert testing.db.dialect.dbapi.__version__.startswith(
".".join(str(x) for x in v)
)
@testing.combinations(
(True, False),
(False, True),
)
def test_backslash_escapes_detection(self, explicit_setting, expected):
engine = engines.testing_engine()
if explicit_setting is not None:
@event.listens_for(engine, "connect", insert=True)
@event.listens_for(engine, "first_connect", insert=True)
def connect(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute(
"SET SESSION standard_conforming_strings = %s"
% ("off" if not explicit_setting else "on")
)
dbapi_connection.commit()
with engine.connect():
eq_(engine.dialect._backslash_escapes, expected)
def test_dbapi_autocommit_attribute(self):
"""all the supported DBAPIs have an .autocommit attribute. make
sure it works and preserves isolation level.
This is added in particular to support the asyncpg dialect that
has a DBAPI compatibility layer.
"""
with testing.db.connect().execution_options(
isolation_level="SERIALIZABLE"
) as conn:
dbapi_conn = conn.connection.dbapi_connection
is_false(dbapi_conn.autocommit)
with conn.begin():
existing_isolation = conn.exec_driver_sql(
"show transaction isolation level"
).scalar()
eq_(existing_isolation.upper(), "SERIALIZABLE")
txid1 = conn.exec_driver_sql("select txid_current()").scalar()
txid2 = conn.exec_driver_sql("select txid_current()").scalar()
eq_(txid1, txid2)
dbapi_conn.autocommit = True
with conn.begin():
# magic way to see if we are in autocommit mode from
# the server's perspective
txid1 = conn.exec_driver_sql("select txid_current()").scalar()
txid2 = conn.exec_driver_sql("select txid_current()").scalar()
ne_(txid1, txid2)
dbapi_conn.autocommit = False
with conn.begin():
existing_isolation = conn.exec_driver_sql(
"show transaction isolation level"
).scalar()
eq_(existing_isolation.upper(), "SERIALIZABLE")
txid1 = conn.exec_driver_sql("select txid_current()").scalar()
txid2 = conn.exec_driver_sql("select txid_current()").scalar()
eq_(txid1, txid2)
@testing.combinations((True,), (False,), argnames="pre_ping")
def test_readonly_flag_connection(self, testing_engine, pre_ping):
if pre_ping:
engine = testing_engine(options={"pool_pre_ping": True})
else:
engine = testing_engine()
for i in range(2):
with engine.connect() as conn:
# asyncpg requires serializable for readonly..
conn = conn.execution_options(
isolation_level="SERIALIZABLE", postgresql_readonly=True
)
conn.execute(text("select 1")).scalar()
dbapi_conn = conn.connection.dbapi_connection
cursor = dbapi_conn.cursor()
cursor.execute("show transaction_read_only")
val = cursor.fetchone()[0]
cursor.close()
eq_(val, "on")
is_true(testing.db.dialect.get_readonly(dbapi_conn))
cursor = dbapi_conn.cursor()
try:
cursor.execute("show transaction_read_only")
val = cursor.fetchone()[0]
finally:
cursor.close()
dbapi_conn.rollback()
eq_(val, "off")
@testing.combinations((True,), (False,), argnames="pre_ping")
def test_deferrable_flag_connection(self, testing_engine, pre_ping):
if pre_ping:
engine = testing_engine(options={"pool_pre_ping": True})
else:
engine = testing_engine()
for i in range(2):
with engine.connect() as conn:
# asyncpg but not for deferrable? which the PG docs actually
# state. weird
conn = conn.execution_options(
isolation_level="SERIALIZABLE", postgresql_deferrable=True
)
conn.execute(text("Select 1")).scalar()
dbapi_conn = conn.connection.dbapi_connection
cursor = dbapi_conn.cursor()
cursor.execute("show transaction_deferrable")
val = cursor.fetchone()[0]
cursor.close()
eq_(val, "on")
is_true(testing.db.dialect.get_deferrable(dbapi_conn))
cursor = dbapi_conn.cursor()
try:
cursor.execute("show transaction_deferrable")
val = cursor.fetchone()[0]
finally:
cursor.close()
dbapi_conn.rollback()
eq_(val, "off")
@testing.combinations((True,), (False,), argnames="pre_ping")
def test_readonly_flag_engine(self, testing_engine, pre_ping):
engine = testing_engine(
options={
"execution_options": dict(
isolation_level="SERIALIZABLE", postgresql_readonly=True
),
"pool_pre_ping": pre_ping,
}
)
for i in range(2):
with engine.connect() as conn:
conn.execute(text("select 1")).scalar()
dbapi_conn = conn.connection.dbapi_connection
cursor = dbapi_conn.cursor()
cursor.execute("show transaction_read_only")
val = cursor.fetchone()[0]
cursor.close()
eq_(val, "on")
cursor = dbapi_conn.cursor()
try:
cursor.execute("show transaction_read_only")
val = cursor.fetchone()[0]
finally:
cursor.close()
dbapi_conn.rollback()
eq_(val, "off")
@testing.combinations((True,), (False,), argnames="autocommit")
def test_autocommit_pre_ping(self, testing_engine, autocommit):
engine = testing_engine(
options={
"isolation_level": (
"AUTOCOMMIT" if autocommit else "SERIALIZABLE"
),
"pool_pre_ping": True,
}
)
for i in range(4):
with engine.connect() as conn:
conn.execute(text("select 1")).scalar()
dbapi_conn = conn.connection.dbapi_connection
eq_(dbapi_conn.autocommit, autocommit)
@testing.only_on("+asyncpg")
@testing.combinations((True,), (False,), argnames="autocommit")
def test_asyncpg_transactional_ping(self, testing_engine, autocommit):
"""test #10226"""
engine = testing_engine(
options={
"isolation_level": (
"AUTOCOMMIT" if autocommit else "SERIALIZABLE"
),
"pool_pre_ping": True,
}
)
conn = engine.connect()
dbapi_conn = conn.connection.dbapi_connection
conn.close()
future = asyncio.Future()
future.set_result(None)
rollback = mock.Mock(return_value=future)
transaction = mock.Mock(
return_value=mock.Mock(
start=mock.Mock(return_value=future),
rollback=rollback,
)
)
mock_asyncpg_connection = mock.Mock(
fetchrow=mock.Mock(return_value=future), transaction=transaction
)
with mock.patch.object(
dbapi_conn, "_connection", mock_asyncpg_connection
):
conn = engine.connect()
conn.close()
if autocommit:
eq_(transaction.mock_calls, [])
eq_(rollback.mock_calls, [])
else:
eq_(transaction.mock_calls, [mock.call()])
eq_(rollback.mock_calls, [mock.call()])
def test_deferrable_flag_engine(self):
engine = engines.testing_engine(
options={
"execution_options": dict(
isolation_level="SERIALIZABLE", postgresql_deferrable=True
)
}
)
for i in range(2):
with engine.connect() as conn:
# asyncpg but not for deferrable? which the PG docs actually
# state. weird
dbapi_conn = conn.connection.dbapi_connection
cursor = dbapi_conn.cursor()
cursor.execute("show transaction_deferrable")
val = cursor.fetchone()[0]
cursor.close()
eq_(val, "on")
cursor = dbapi_conn.cursor()
try:
cursor.execute("show transaction_deferrable")
val = cursor.fetchone()[0]
finally:
cursor.close()
dbapi_conn.rollback()
eq_(val, "off")
@testing.requires.any_psycopg_compatibility
def test_psycopg_non_standard_err(self):
# note that psycopg2 is sometimes called psycopg2cffi
# depending on platform
psycopg = testing.db.dialect.dbapi
if psycopg.__version__.startswith("3"):
TransactionRollbackError = __import__(
"%s.errors" % psycopg.__name__
).errors.TransactionRollback
else:
TransactionRollbackError = __import__(
"%s.extensions" % psycopg.__name__
).extensions.TransactionRollbackError
exception = exc.DBAPIError.instance(
"some statement",
{},
TransactionRollbackError("foo"),
psycopg.Error,
)
assert isinstance(exception, exc.OperationalError)
@testing.requires.no_coverage
@testing.requires.any_psycopg_compatibility
def test_notice_logging(self):
log = logging.getLogger("sqlalchemy.dialects.postgresql")
buf = logging.handlers.BufferingHandler(100)
lev = log.level
log.addHandler(buf)
log.setLevel(logging.INFO)
try:
conn = testing.db.connect()
trans = conn.begin()
try:
conn.exec_driver_sql(
"""
CREATE OR REPLACE FUNCTION note(message varchar) RETURNS integer AS $$
BEGIN
RAISE NOTICE 'notice: %%', message;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
"""
)
conn.exec_driver_sql("SELECT note('hi there')")
conn.exec_driver_sql("SELECT note('another note')")
finally:
trans.rollback()
conn.close()
finally:
log.removeHandler(buf)
log.setLevel(lev)
msgs = " ".join(b.getMessage() for b in buf.buffer)
eq_regex(
msgs,
"NOTICE: [ ]?notice: hi there(\nCONTEXT: .*?)? "
"NOTICE: [ ]?notice: another note(\nCONTEXT: .*?)?",
)
@testing.requires.psycopg_or_pg8000_compatibility
@engines.close_open_connections
def test_client_encoding(self):
c = testing.db.connect()
current_encoding = c.exec_driver_sql(
"show client_encoding"
).fetchone()[0]
c.close()
# attempt to use an encoding that's not
# already set
if current_encoding == "UTF8":
test_encoding = "LATIN1"
else:
test_encoding = "UTF8"
e = engines.testing_engine(options={"client_encoding": test_encoding})
c = e.connect()
new_encoding = c.exec_driver_sql("show client_encoding").fetchone()[0]
eq_(new_encoding, test_encoding)
@testing.requires.psycopg_or_pg8000_compatibility
@engines.close_open_connections
def test_autocommit_isolation_level(self):
c = testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
)
# If we're really in autocommit mode then we'll get an error saying
# that the prepared transaction doesn't exist. Otherwise, we'd
# get an error saying that the command can't be run within a
# transaction.
assert_raises_message(
exc.ProgrammingError,
'prepared transaction with identifier "gilberte" does not exist',
c.exec_driver_sql,
"commit prepared 'gilberte'",
)
def test_extract(self, connection):
fivedaysago = connection.execute(
select(func.now().op("at time zone")("UTC"))
).scalar() - datetime.timedelta(days=5)
for field, exp in (
("year", fivedaysago.year),
("month", fivedaysago.month),
("day", fivedaysago.day),
):
r = connection.execute(
select(
extract(
field,
func.now().op("at time zone")("UTC")
+ datetime.timedelta(days=-5),
)
)
).scalar()
eq_(r, exp)
@testing.provide_metadata
def test_checksfor_sequence(self, connection):
meta1 = self.metadata
seq = Sequence("fooseq")
t = Table("mytable", meta1, Column("col1", Integer, seq))
seq.drop(connection)
connection.execute(text("CREATE SEQUENCE fooseq"))
t.create(connection, checkfirst=True)
@testing.combinations(True, False, argnames="implicit_returning")
def test_sequence_detection_tricky_names(
self, metadata, connection, implicit_returning
):
for tname, cname in [
("tb1" * 30, "abc"),
("tb2", "abc" * 30),
("tb3" * 30, "abc" * 30),
("tb4", "abc"),
]:
t = Table(
tname[:57],
metadata,
Column(cname[:57], Integer, primary_key=True),
implicit_returning=implicit_returning,
)
t.create(connection)
r = connection.execute(t.insert())
eq_(r.inserted_primary_key, (1,))
@testing.provide_metadata
def test_schema_roundtrips(self, connection):
meta = self.metadata
users = Table(
"users",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
schema="test_schema",
)
users.create(connection)
connection.execute(users.insert(), dict(id=1, name="name1"))
connection.execute(users.insert(), dict(id=2, name="name2"))
connection.execute(users.insert(), dict(id=3, name="name3"))
connection.execute(users.insert(), dict(id=4, name="name4"))
eq_(
connection.execute(
users.select().where(users.c.name == "name2")
).fetchall(),
[(2, "name2")],
)
eq_(
connection.execute(
users.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.where(users.c.name == "name2")
).fetchall(),
[(2, "name2")],
)
connection.execute(users.delete().where(users.c.id == 3))
eq_(
connection.execute(
users.select().where(users.c.name == "name3")
).fetchall(),
[],
)
connection.execute(
users.update().where(users.c.name == "name4"), dict(name="newname")
)
eq_(
connection.execute(
users.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.where(users.c.id == 4)
).fetchall(),
[(4, "newname")],
)
def test_quoted_name_bindparam_ok(self):
from sqlalchemy.sql.elements import quoted_name
with testing.db.connect() as conn:
eq_(
conn.scalar(
select(
cast(
literal(quoted_name("some_name", False)),
String,
)
)
),
"some_name",
)
@testing.provide_metadata
def test_preexecute_passivedefault(self, connection):
"""test that when we get a primary key column back from
reflecting a table which has a default value on it, we pre-
execute that DefaultClause upon insert."""
meta = self.metadata
connection.execute(
text(
"""
CREATE TABLE speedy_users
(
speedy_user_id SERIAL PRIMARY KEY,
user_name VARCHAR NOT NULL,
user_password VARCHAR NOT NULL
);
"""
)
)
t = Table("speedy_users", meta, autoload_with=connection)
r = connection.execute(
t.insert(), dict(user_name="user", user_password="lala")
)
eq_(r.inserted_primary_key, (1,))
result = connection.execute(t.select()).fetchall()
assert result == [(1, "user", "lala")]
connection.execute(text("DROP TABLE speedy_users"))
@testing.requires.psycopg_or_pg8000_compatibility
def test_numeric_raise(self, connection):
stmt = text("select cast('hi' as char) as hi").columns(hi=Numeric)
assert_raises(exc.InvalidRequestError, connection.execute, stmt)
@testing.combinations(
(None, Integer, "SERIAL"),
(None, BigInteger, "BIGSERIAL"),
((9, 1), SmallInteger, "SMALLINT"),
((9, 2), SmallInteger, "SMALLSERIAL"),
(None, SmallInteger, "SMALLSERIAL"),
(None, postgresql.INTEGER, "SERIAL"),
(None, postgresql.BIGINT, "BIGSERIAL"),
(
None,
Integer().with_variant(BigInteger(), "postgresql"),
"BIGSERIAL",
),
(
None,
Integer().with_variant(postgresql.BIGINT, "postgresql"),
"BIGSERIAL",
),
(
(9, 2),
Integer().with_variant(SmallInteger, "postgresql"),
"SMALLSERIAL",
),
(None, "BITD()", "BIGSERIAL"),
argnames="version, type_, expected",
)
def test_serial_integer(self, version, type_, expected, testing_engine):
if type_ == "BITD()":
class BITD(TypeDecorator):
impl = Integer
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "postgresql":
return BigInteger()
else:
return Integer()
type_ = BITD()
t = Table("t", MetaData(), Column("c", type_, primary_key=True))
if version:
engine = testing_engine()
dialect = engine.dialect
dialect._get_server_version_info = mock.Mock(return_value=version)
engine.connect().close() # initialize the dialect
else:
dialect = testing.db.dialect
ddl_compiler = dialect.ddl_compiler(dialect, schema.CreateTable(t))
eq_(
ddl_compiler.get_column_specification(t.c.c),
"c %s NOT NULL" % expected,
)
@testing.requires.psycopg2_compatibility
def test_initial_transaction_state_psycopg2(self):
from psycopg2.extensions import STATUS_IN_TRANSACTION
engine = engines.testing_engine()
with engine.connect() as conn:
ne_(conn.connection.status, STATUS_IN_TRANSACTION)
@testing.only_on("postgresql+psycopg")
def test_initial_transaction_state_psycopg(self):
from psycopg.pq import TransactionStatus
engine = engines.testing_engine()
with engine.connect() as conn:
ne_(
conn.connection.dbapi_connection.info.transaction_status,
TransactionStatus.INTRANS,
)
def test_select_rowcount(self):
conn = testing.db.connect()
cursor = conn.exec_driver_sql("SELECT 1")
eq_(cursor.rowcount, 1)
| MiscBackendTest |
python | ethereum__web3.py | web3/contract/base_contract.py | {
"start": 53629,
"end": 53911
} | class ____:
@staticmethod
def _raise_exception() -> NoReturn:
raise ABIFallbackNotFound("No fallback function was found in the contract ABI.")
def __getattr__(self, attr: Any) -> Callable[[], None]:
return self._raise_exception
| NonExistentFallbackFunction |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1149816,
"end": 1150237
} | class ____(ScaleInvalidDataShowAsxOffset):
"""
ScaleInvalidDataShowAsValuexOffset schema wrapper.
Parameters
----------
value : float
Offset for x-position.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"xOffset">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValuexOffset |
python | doocs__leetcode | solution/2500-2599/2565.Subsequence With the Minimum Score/Solution.py | {
"start": 0,
"end": 764
} | class ____:
def minimumScore(self, s: str, t: str) -> int:
def check(x):
for k in range(n):
i, j = k - 1, k + x
l = f[i] if i >= 0 else -1
r = g[j] if j < n else m + 1
if l < r:
return True
return False
m, n = len(s), len(t)
f = [inf] * n
g = [-1] * n
i, j = 0, 0
while i < m and j < n:
if s[i] == t[j]:
f[j] = i
j += 1
i += 1
i, j = m - 1, n - 1
while i >= 0 and j >= 0:
if s[i] == t[j]:
g[j] = i
j -= 1
i -= 1
return bisect_left(range(n + 1), True, key=check)
| Solution |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 39907,
"end": 40261
} | class ____[T](BaseModel):
t: 'T'
"""
)
assert mod_1.Model[int].model_fields['t'].annotation is int
@pytest.mark.skipif(sys.version_info < (3, 12), reason='Test related to PEP 695 syntax.')
def test_pep695_generics_syntax_arbitrary_class(create_module) -> None:
mod_1 = create_module(
"""
from typing import TypedDict
| Model |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/iterators.py | {
"start": 1231,
"end": 1459
} | class ____:
def __iter__(self) -> Iterator[str]:
return iter([_test_source()])
def test_custom_iter():
# TODO(T137627339): False negative with custom `__iter__`
_test_sink(next(iter(CustomIter())))
| CustomIter |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 3963,
"end": 4092
} | class ____(Node):
"""A module imported into the current module, possibly with an alias."""
name: str
module_name: str
| Module |
python | sphinx-doc__sphinx | sphinx/errors.py | {
"start": 2882,
"end": 3131
} | class ____(Exception):
"""Pycode Python source code analyser error."""
def __str__(self) -> str:
res = self.args[0]
if len(self.args) > 1:
res += ' (exception was: %r)' % self.args[1]
return res
| PycodeError |
python | getsentry__sentry | tests/sentry/dynamic_sampling/tasks/test_common.py | {
"start": 585,
"end": 2274
} | class ____(BaseMetricsLayerTestCase, TestCase, SnubaTestCase):
def setUp(self) -> None:
# create 10 orgs each with 10 transactions
for i in range(10):
org = self.create_organization(f"org-{i}")
for i in range(10):
project = self.create_project(organization=org)
self.store_performance_metric(
name=TransactionMRI.COUNT_PER_ROOT_PROJECT.value,
tags={"transaction": "foo_transaction", "decision": "keep"},
minutes_before_now=30,
value=1,
project_id=project.id,
org_id=org.id,
)
@property
def now(self):
return MOCK_DATETIME
def test_get_active_orgs_no_max_projects(self) -> None:
total_orgs = 0
for idx, orgs in enumerate(GetActiveOrgs(3)):
num_orgs = len(orgs)
total_orgs += num_orgs
if idx in [0, 1, 2]:
assert num_orgs == 3 # first batch should be full
else:
assert num_orgs == 1 # second should contain the remaining 3
assert total_orgs == 10
def test_get_active_orgs_with_max_projects(self) -> None:
total_orgs = 0
for orgs in GetActiveOrgs(3, 18):
# we ask for max 18 proj (that's 2 org per request since one org has 10 )
num_orgs = len(orgs)
total_orgs += num_orgs
assert num_orgs == 2 # only 2 orgs since we limit the number of projects
assert total_orgs == 10
NOW_ISH = timezone.now().replace(second=0, microsecond=0)
@freeze_time(MOCK_DATETIME)
| TestGetActiveOrgs |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 13679,
"end": 16239
} | class ____(Pool):
"""Three-dimensional downsample using the maximum over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=-jnp.inf,
operation=lax.max,
num_spatial_dims=3,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
@named_scope("eqx.nn.MaxPool3d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape
`(channels, dim_1, dim_2, dim_3)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim_1, new_dim_2, new_dim_3)`.
"""
return super().__call__(x)
def _adaptive_pool1d(
x: Array, target_size: int, operation: Callable[[Array], Array]
) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(dim,)`.
- `target_size`: The shape of the output after the pooling operation
`(target_size,)`.
- `operation`: The pooling operation to be performed on the input array.
**Returns:**
A JAX array of shape `(1, target_shape)`.
"""
dims = jnp.size(x)
num_head_arrays = dims % target_size
if num_head_arrays != 0:
head_end_index = num_head_arrays * (dims // target_size + 1)
head_op = jax.vmap(operation)(x[:head_end_index].reshape(num_head_arrays, -1))
tail_op = jax.vmap(operation)(
x[head_end_index:].reshape(-1, dims // target_size)
)
outputs = jnp.concatenate([head_op, tail_op])
else:
outputs = jax.vmap(operation)(
jax.vmap(operation)(x.reshape(-1, dims // target_size))
)
return outputs
| MaxPool3d |
python | huggingface__transformers | tests/test_tokenization_common.py | {
"start": 11335,
"end": 127070
} | class ____:
tokenizer_class = None
space_between_special_tokens = False
from_pretrained_kwargs = None
from_pretrained_filter = None
from_pretrained_id = None
from_pretrained_vocab_key = "vocab_file"
test_seq2seq = True
test_tokenizer_from_extractor = True
# set to True to test a sentencepiece tokenizer
test_sentencepiece = False
# set to True to ignore casing when testing a sentencepiece tokenizer
# test_sentencepiece must also be set to True
test_sentencepiece_ignore_case = False
# Integration test data - can be optionally set by subclasses
# Default comprehensive test string covering various edge cases
integration_test_input_string = """This is a test 😊
I was born in 92000, and this is falsé.
生活的真谛是
Hi Hello
Hi Hello
Hello
<s>
hi<s>there
The following string should be properly encoded: Hello.
But ird and ปี ird ด
Hey how are you doing""" # noqa: W293
integration_expected_tokens = None
integration_expected_token_ids = None
@classmethod
def setUpClass(cls) -> None:
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, tokenizer class, vocab key name)
if cls.from_pretrained_id is None:
cls.from_pretrained_id = []
elif isinstance(cls.from_pretrained_id, str):
cls.from_pretrained_id = [cls.from_pretrained_id]
cls.tokenizers_list = []
if cls.tokenizer_class is not None:
cls.tokenizers_list = [
(
cls.tokenizer_class,
pretrained_id,
cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {},
)
for pretrained_id in cls.from_pretrained_id
]
with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data:
cls._data = f_data.read().replace("\n\n", "\n").strip()
cls.tmpdirname = tempfile.mkdtemp()
# save the first pretrained tokenizer to tmpdirname for tests to use
if cls.from_pretrained_id and cls.tokenizer_class is not None:
try:
tokenizer = AutoTokenizer.from_pretrained(
cls.from_pretrained_id[0],
**(cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {}),
)
tokenizer.save_pretrained(cls.tmpdirname)
except Exception:
pass
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return input_txt, input_txt
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> tuple[str, list]:
# the length of the tokenizer does not always represent the tokens that it can encode: what if there are holes?
toks = [
(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in set(tokenizer.get_vocab().values())
]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
def get_tokenizers(self, **kwargs) -> list[PreTrainedTokenizerBase]:
"""
Returns a list containing a single tokenizer from get_tokenizer().
Subclasses can override this method to return multiple tokenizers for testing.
"""
return [self.get_tokenizer(**kwargs)]
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs) -> PreTrainedTokenizer:
"""Get a tokenizer instance from pretrained."""
pretrained_name = pretrained_name or cls.tmpdirname
return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
def get_extracted_tokenizer(self, reference_tokenizer=None):
"""
Build a tokenizer from extracted vocab/merges using TokenizersExtractor.
Args:
reference_tokenizer: Optional tokenizer to copy special tokens from.
If None, uses get_tokenizer().
Returns:
Tokenizer built from extracted vocab/merges, or None if extraction fails.
"""
if reference_tokenizer is None:
reference_tokenizer = self.get_tokenizer()
try:
tokenizer_json_path = os.path.join(self.tmpdirname, "tokenizer.json")
if not os.path.exists(tokenizer_json_path):
return None
extractor = TokenizersExtractor(tokenizer_json_path)
vocab_ids, vocab_scores, merges, added_tokens_decoder = extractor.extract()
# Convert added_tokens list to added_tokens_decoder dict format
# This matches the format used by from_pretrained() from tokenizer_config.json
tokenizer_from_extractor = self.tokenizer_class(
vocab=vocab_scores,
merges=merges,
do_lower_case=False,
keep_accents=True,
added_tokens_decoder=added_tokens_decoder,
**(self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {}),
)
return tokenizer_from_extractor
except (TypeError, Exception):
# fail and raise the error
raise
def get_extracted_tokenizer_from_sentencepiece(self, reference_tokenizer=None):
"""
Build a tokenizer from extracted vocab/merges using SentencePieceExtractor.
"""
from transformers.tokenization_utils_sentencepiece import SentencePieceExtractor
try:
sentencepiece_model_path = os.path.join(self.tmpdirname, "tokenizer.model")
if not os.path.exists(sentencepiece_model_path):
return None
extractor = SentencePieceExtractor(sentencepiece_model_path)
vocab_ids, vocab_scores, merges = extractor.extract()
tokenizer_from_extractor = self.tokenizer_class(vocab=vocab_ids, merges=merges)
return tokenizer_from_extractor
except (TypeError, Exception):
return None
def tokenizer_integration_test_util(
self,
expected_encoding: dict,
model_name: str,
revision: Optional[str] = None,
sequences: Optional[list[str]] = None,
decode_kwargs: Optional[dict[str, Any]] = None,
padding: bool = True,
):
"""
Util for integration test.
Text is tokenized and then reverted back to text. Both results are then checked.
Args:
expected_encoding:
The expected result of the tokenizer output.
model_name:
The model name of the tokenizer to load and use.
revision:
The full git revision number of the model. This is to pin the
tokenizer config and to avoid that tests start to fail if the
config gets changed upstream.
sequences:
Can overwrite the texts that are used to check the tokenizer.
This is useful if the tokenizer supports non english languages
like france.
decode_kwargs:
Additional args for the ``decode`` function which reverts the
tokenized text back to a string.
padding:
Activates and controls padding of the tokenizer.
"""
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
if sequences is None:
sequences = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained "
"models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
if self.test_sentencepiece_ignore_case:
sequences = [sequence.lower() for sequence in sequences]
tokenizer_classes = [self.tokenizer_class]
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained(
model_name,
revision=revision, # to pin the tokenizer version
)
encoding = tokenizer(sequences, padding=padding)
decoded_sequences = [
tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"]
]
encoding_data = encoding.data
self.assertDictEqual(encoding_data, expected_encoding)
for expected, decoded in zip(sequences, decoded_sequences):
if self.test_sentencepiece_ignore_case:
expected = expected.lower()
self.assertEqual(expected, decoded)
def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int):
# Ensure we match max_length
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
# Ensure the number of padded tokens is the same
padded_tokens_r = list(takewhile(lambda i: i == pad_token_id, reversed(input_r)))
padded_tokens_p = list(takewhile(lambda i: i == pad_token_id, reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(
self,
input_r: dict,
input_p: dict,
max_length: int,
pad_token_id: int,
model_main_input_name: str = "input_ids",
):
for i_r in input_r.values():
(
self.assertEqual(len(i_r), 2),
self.assertEqual(len(i_r[0]), max_length),
self.assertEqual(len(i_r[1]), max_length),
)
(
self.assertEqual(len(i_r), 2),
self.assertEqual(len(i_r[0]), max_length),
self.assertEqual(len(i_r[1]), max_length),
)
for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]):
self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id)
for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
self.assertSequenceEqual(i_r, i_p)
@staticmethod
def convert_batch_to_list_format(batch_encode_plus_sequences):
# Switch from batch_encode_plus format: {'input_ids': [[...], [...]], ...}
# to the list of examples/ encode_plus format: [{'input_ids': [...], ...}, {'input_ids': [...], ...}]
return [
{value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences}
for i in range(len(batch_encode_plus_sequences["input_ids"]))
]
# TODO: this test can be combined with `test_sentencepiece_tokenize_and_convert_tokens_to_string` after the latter is extended to all tokenizers.
def test_tokenize_special_tokens(self):
"""Test `tokenize` with special tokens."""
tokenizer = self.get_tokenizer(do_lower_case=True)
SPECIAL_TOKEN_1 = "[SPECIAL_TOKEN_1]"
SPECIAL_TOKEN_2 = "[SPECIAL_TOKEN_2]"
# Both methods should add the token to `_extra_special_tokens` and `added_tokens_decoder`
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True)
tokenizer.add_special_tokens({"extra_special_tokens": [SPECIAL_TOKEN_2]}, replace_extra_special_tokens=False)
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
# next is failing for almost all the Fast tokenizers now.
# self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
def test_model_input_names_signature(self):
accepted_model_main_input_names = [
"input_ids", # nlp models
"input_values", # speech models
]
tokenizer = self.get_tokenizer()
# first name of model_input_names has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
self.assertTrue(tokenizer.model_input_names[0] in accepted_model_main_input_names)
def test_tokenizer_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty and parameter_name not in [
"vocab_file",
"merges_file",
"tokenizer_file",
"vocab",
"merges",
"legacy",
]:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizers_common_properties(self):
tokenizer = self.get_tokenizer()
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, attr + "_id"))
self.assertTrue(hasattr(tokenizer, "extra_special_tokens"))
self.assertTrue(hasattr(tokenizer, "extra_special_tokens_ids"))
attributes_list = [
"model_max_length",
"init_inputs",
"init_kwargs",
]
if not isinstance(tokenizer, TokenizersBackend):
attributes_list += [
"added_tokens_encoder",
"added_tokens_decoder",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_tokenizers_common_ids_setters(self):
tokenizer = self.get_tokenizer()
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
vocab = tokenizer.get_vocab()
token_id_to_test_setters = next(iter(vocab.values()))
token_to_test_setters = tokenizer.convert_ids_to_tokens(token_id_to_test_setters, skip_special_tokens=False)
for attr in attributes_list:
setattr(tokenizer, attr + "_id", None)
self.assertEqual(getattr(tokenizer, attr), None)
self.assertEqual(getattr(tokenizer, attr + "_id"), None)
setattr(tokenizer, attr + "_id", token_id_to_test_setters)
self.assertEqual(getattr(tokenizer, attr), token_to_test_setters)
self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters)
setattr(tokenizer, "extra_special_tokens_ids", [])
self.assertListEqual(getattr(tokenizer, "extra_special_tokens"), [])
self.assertListEqual(getattr(tokenizer, "extra_special_tokens_ids"), [])
setattr(tokenizer, "extra_special_tokens_ids", [token_id_to_test_setters])
self.assertListEqual(getattr(tokenizer, "extra_special_tokens"), [token_to_test_setters])
self.assertListEqual(getattr(tokenizer, "extra_special_tokens_ids"), [token_id_to_test_setters])
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizer = self.get_tokenizer()
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizer = self.get_tokenizer()
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizer = self.get_tokenizer(model_max_length=42)
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
tokenizer.add_tokens(["bim", "bambam"])
extra_special_tokens = tokenizer.extra_special_tokens
extra_special_tokens.append("new_extra_special_token")
tokenizer.add_special_tokens(
{"extra_special_tokens": extra_special_tokens}, replace_extra_special_tokens=False
)
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_extra_special_token", after_tokenizer.extra_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
# Test that we can also use the non-legacy saving format for fast tokenizers
tokenizer = self.get_tokenizer(model_max_length=42)
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
tokenizer.add_tokens(["bim", "bambam"])
extra_special_tokens = tokenizer.extra_special_tokens
extra_special_tokens.append("new_extra_special_token")
tokenizer.add_special_tokens(
{"extra_special_tokens": extra_special_tokens}, replace_extra_special_tokens=False
)
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_extra_special_token", after_tokenizer.extra_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def _run_integration_checks(self, tokenizer, tokenizer_type):
# Test 1: Tokens match expected
tokens = tokenizer.tokenize(self.integration_test_input_string)
self.assertEqual(
tokens,
self.integration_expected_tokens,
f"Tokenized tokens don't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
# Test 2: IDs from encode match expected (without special tokens)
ids_from_encode = tokenizer.encode(self.integration_test_input_string, add_special_tokens=False)
self.assertEqual(
ids_from_encode,
self.integration_expected_token_ids,
f"Encoded IDs don't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
# Test 3: Round-trip decode produces expected text (if provided)
decoded_text = tokenizer.decode(self.integration_expected_token_ids, clean_up_tokenization_spaces=False)
self.assertEqual(
decoded_text,
self.integration_expected_decoded_text,
f"Decoded text doesn't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
def test_integration(self):
"""
Integration checks for the original tokenizer only.
"""
# Skip if no integration test data is provided
if not hasattr(self, "integration_test_input_string") or self.integration_test_input_string is None:
self.skipTest("No integration test input string provided")
if not hasattr(self, "integration_expected_tokens") or self.integration_expected_tokens is None:
self.skipTest("No integration expected tokens provided")
if not hasattr(self, "integration_expected_token_ids") or self.integration_expected_token_ids is None:
self.skipTest("No integration expected token IDs provided")
if not hasattr(self, "integration_expected_decoded_text") or self.integration_expected_decoded_text is None:
self.skipTest("No integration expected decoded text provided")
tokenizer_original = self.tokenizer_class.from_pretrained(
self.from_pretrained_id[0],
do_lower_case=False,
keep_accents=True,
**(self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {}),
)
self._run_integration_checks(tokenizer_original, "original")
def test_integration_from_extractor(self):
"""
Integration checks for a tokenizer built via TokenizersExtractor.
"""
# Skip if tokenizer-from-extractor path is not enabled for this class
if not getattr(self, "test_tokenizer_from_extractor", False):
self.skipTest("Tokenizer from TokenizersExtractor not enabled for this tokenizer")
# Skip if no integration test data is provided
if not hasattr(self, "integration_test_input_string") or self.integration_test_input_string is None:
self.skipTest("No integration test input string provided")
if not hasattr(self, "integration_expected_tokens") or self.integration_expected_tokens is None:
self.skipTest("No integration expected tokens provided")
if not hasattr(self, "integration_expected_token_ids") or self.integration_expected_token_ids is None:
self.skipTest("No integration expected token IDs provided")
if not hasattr(self, "integration_expected_decoded_text") or self.integration_expected_decoded_text is None:
self.skipTest("No integration expected decoded text provided")
tokenizer_original = self.tokenizer_class.from_pretrained(
self.from_pretrained_id[0],
do_lower_case=False,
keep_accents=True,
**(self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {}),
)
tokenizer_from_extractor = self.get_extracted_tokenizer(reference_tokenizer=tokenizer_original)
if tokenizer_from_extractor is None:
self.fail("No tokenizer from TokenizersExtractor provided")
self._run_integration_checks(tokenizer_from_extractor, "from_extractor")
def test_internal_consistency(self):
tokenizer = self.get_tokenizer()
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
seq_0 = "Test this method."
seq_1 = "With these inputs."
information = tokenizer(seq_0, seq_1, add_special_tokens=True, return_token_type_ids=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_token_type_ids(self):
tokenizer = self.get_tokenizer()
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0, return_token_type_ids=True)
self.assertIn(0, output["token_type_ids"])
def test_sequence_ids(self):
tokenizer = self.get_tokenizer()
if tokenizer.backend != "tokenizers":
self.skipTest(reason="Tokenizers backend tokenizer")
seq_0 = "Test this method."
seq_1 = "With these inputs."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids\
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
@require_jinja
def test_chat_template(self):
dummy_template = "{% for message in messages %}{{message['role'] + message['content']}}{% endfor %}"
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
]
expected_output = "systemsystem messageuseruser messageassistantassistant message"
tokenizer = self.get_tokenizer()
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, return_dict=False
)
self.assertEqual(output, expected_output) # Test we can pass chat_template arg
# Check that no error raised when tokenize=True
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=True, return_dict=False
)
dict_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=True, return_dict=True
)
self.assertEqual(dict_output["input_ids"], output) # Test return_dict behaviour matches
tokenizer.chat_template = dummy_template
self.assertEqual(tokenizer.chat_template, dummy_template) # Test property setter
output = tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test chat_template attribute is used if no arg is passed
# Check that no error raised
tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
with tempfile.TemporaryDirectory() as tmp_dir_name:
save_files = tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=False)
# Check we aren't saving a chat_template.jinja file
self.assertFalse(any(file.endswith("chat_template.jinja") for file in save_files))
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
self.assertEqual(new_tokenizer.chat_template, dummy_template) # Test template has persisted
output = new_tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test output is the same after reloading
# Check that no error raised
new_tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
with tempfile.TemporaryDirectory() as tmp_dir_name:
save_files = tokenizer.save_pretrained(tmp_dir_name)
# Check we are saving a chat_template.jinja file
self.assertTrue(any(file.endswith("chat_template.jinja") for file in save_files))
chat_template_file = Path(tmp_dir_name) / "chat_template.jinja"
self.assertTrue(chat_template_file.is_file())
self.assertEqual(chat_template_file.read_text(), dummy_template)
config_dict = json.loads((Path(tmp_dir_name) / "tokenizer_config.json").read_text())
# Assert the chat template is not in the config when it's saved as a separate file
self.assertNotIn("chat_template", config_dict)
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
self.assertEqual(new_tokenizer.chat_template, dummy_template) # Test template has persisted
output = new_tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test output is the same after reloading
# Check that no error raised
new_tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
@require_jinja
def test_chat_template_save_loading(self):
tokenizer = self.get_tokenizer()
signature = inspect.signature(tokenizer.__init__)
if "chat_template" not in {*signature.parameters.keys()}:
self.skipTest("tokenizer doesn't accept chat templates at input")
tokenizer.chat_template = "test template"
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.chat_template = {"default": "a", "secondary": "b"}
tokenizer.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertTrue(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.chat_template = {"default": "a", "secondary": "b"}
tokenizer.save_pretrained(tmpdirname, save_jinja_files=False)
self.assertFalse(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
@require_jinja
def test_chat_template_batched(self):
dummy_template = "{% for message in messages %}{{message['role'] + message['content']}}{% endfor %}"
dummy_conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
],
[
{"role": "system", "content": "system message 2"},
{"role": "user", "content": "user message 2"},
{"role": "assistant", "content": "assistant message 2"},
],
]
tokenizer = self.get_tokenizer()
output = tokenizer.apply_chat_template(dummy_conversations, chat_template=dummy_template, tokenize=False)
self.assertEqual(
output,
[
"systemsystem messageuseruser messageassistantassistant message",
"systemsystem message 2useruser message 2assistantassistant message 2",
],
)
one_element_output = tokenizer.apply_chat_template(
dummy_conversations[:1], chat_template=dummy_template, tokenize=False
)
self.assertEqual(
one_element_output, ["systemsystem messageuseruser messageassistantassistant message"]
) # Assert that list structure is retained even with one element
tokenizer.apply_chat_template(
dummy_conversations, chat_template=dummy_template, tokenize=True
) # Check that no error raised
@require_jinja
def test_jinja_loopcontrols(self):
break_template = """
{%- for message in messages %}
{{- message.role + " " + message.content }}
{%- if loop.first %}
{%- break %}
{%- endif %}
{%- endfor %}""".strip()
dummy_conversation = [
{"role": "system", "content": "1"},
{"role": "user", "content": "2"},
{"role": "assistant", "content": "3"},
]
tokenizer = self.get_tokenizer()
break_output = tokenizer.apply_chat_template(dummy_conversation, chat_template=break_template, tokenize=False)
self.assertEqual(break_output, "system 1") # Loop should break after first iter
@require_jinja
def test_jinja_strftime(self):
strftime_template = """{{- strftime_now("%Y-%m-%d") }}""".strip()
dummy_conversation = [
{"role": "system", "content": "1"},
{"role": "user", "content": "2"},
{"role": "assistant", "content": "3"},
]
tokenizer = self.get_tokenizer()
strftime_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=strftime_template, tokenize=False
)
# Assert that we get a date formatted as expected
self.assertEqual(len(strftime_output), 10)
self.assertEqual(len(strftime_output.split("-")), 3)
@require_torch
@require_jinja
def test_chat_template_return_assistant_tokens_mask(self):
dummy_template = (
"{% for message in messages %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|im_start|>' + message['role'] + '\n'}}"
"{% generation %}"
"{{message['content'] + '<|im_end|>'}}"
"{% endgeneration %}"
"{{'\n'}}"
"{% endif %}"
"{% endfor %}"
)
conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "start turn 1 assistant message. end turn 1"},
{"role": "user", "content": "user message 2"},
{"role": "assistant", "content": "start turn 2 assistant message. end turn 2"},
],
[
{"role": "system", "content": "system message 3"},
{"role": "user", "content": "user message 3"},
{"role": "assistant", "content": "start turn 3 assistant message. end turn 3"},
{"role": "user", "content": "user message 4"},
{"role": "assistant", "content": "start turn 4 assistant message. end turn 4"},
],
]
# These are the prefix and suffix strings of all the assistant messages. Used to find the assistant substring
# in the entire chat string, and then find the corresponding tokens in the tokenized output.
assistant_prefix_suffix = [
[("start turn 1", "end turn 1<|im_end|>"), ("start turn 2", "end turn 2<|im_end|>")],
[("start turn 3", "end turn 3<|im_end|>"), ("start turn 4", "end turn 4<|im_end|>")],
]
for tokenizer, pretrained_name, _ in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name)
if tokenizer_r.backend != "tokenizers":
self.skipTest(reason="Custom backend tokenizer")
self._check_no_pad_token_padding(tokenizer_r, conversations)
tokenizer_r.padding_side = "right"
# check batched
output = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
)
output_pt = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
padding=True,
return_assistant_tokens_mask=True,
return_dict=True,
return_tensors="pt",
)
self.assertEqual(type(output_pt["assistant_masks"]), torch.Tensor)
self.assertEqual(output_pt["assistant_masks"].shape, output_pt["input_ids"].shape)
for i, conv in enumerate(conversations):
chat_string = tokenizer_r.apply_chat_template(conv, tokenize=False, chat_template=dummy_template)
assistant_start = output.char_to_token(i, chat_string.index(assistant_prefix_suffix[i][0][0]))
assistant_end = output.char_to_token(
i,
chat_string.index(assistant_prefix_suffix[i][0][1])
+ len(assistant_prefix_suffix[i][0][1])
- 1,
)
assistant_start2 = output.char_to_token(i, chat_string.index(assistant_prefix_suffix[i][1][0]))
assistant_end2 = output.char_to_token(
i,
chat_string.index(assistant_prefix_suffix[i][1][1])
+ len(assistant_prefix_suffix[i][1][1])
- 1,
)
if (
assistant_start is None
or assistant_end is None
or assistant_start2 is None
or assistant_end2 is None
):
continue
# assert 1 in first assistant message
self.assertEqual(
output["assistant_masks"][i][assistant_start : assistant_end + 1],
[1] * (assistant_end - assistant_start + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_start : assistant_end + 1] == 1).all(),
)
# assert 1 second assistant message
self.assertEqual(
output["assistant_masks"][i][assistant_start2 : assistant_end2 + 1],
[1] * (assistant_end2 - assistant_start2 + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_start2 : assistant_end2 + 1] == 1).all(),
)
# assert 0 in user/system indices
self.assertEqual(output["assistant_masks"][i][:assistant_start], [0] * assistant_start)
self.assertTrue((output_pt["assistant_masks"][i, :assistant_start] == 0).all())
self.assertEqual(
output["assistant_masks"][i][assistant_end + 1 : assistant_start2],
[0] * (assistant_start2 - assistant_end - 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_end + 1 : assistant_start2] == 0).all(),
)
# check not batched
output = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
)
output_pt = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
return_tensors="pt",
)
self.assertEqual(type(output_pt["assistant_masks"]), torch.Tensor)
self.assertEqual(output_pt["assistant_masks"].shape, output_pt["input_ids"].shape)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
assistant_start = output.char_to_token(0, chat_string.index(assistant_prefix_suffix[0][0][0]))
assistant_end = output.char_to_token(
0, chat_string.index(assistant_prefix_suffix[0][0][1]) + len(assistant_prefix_suffix[0][0][1]) - 1
)
assistant_start2 = output.char_to_token(0, chat_string.index(assistant_prefix_suffix[0][1][0]))
assistant_end2 = output.char_to_token(
0, chat_string.index(assistant_prefix_suffix[0][1][1]) + len(assistant_prefix_suffix[0][1][1]) - 1
)
if (
assistant_start is None
or assistant_end is None
or assistant_start2 is None
or assistant_end2 is None
):
return
# assert 1 in assistant indices
self.assertEqual(
output["assistant_masks"][assistant_start : assistant_end + 1],
[1] * (assistant_end - assistant_start + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][assistant_start : assistant_end + 1] == 1).all(),
)
self.assertEqual(
output["assistant_masks"][assistant_start2 : assistant_end2 + 1],
[1] * (assistant_end2 - assistant_start2 + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][assistant_start2 : assistant_end2 + 1] == 1).all(),
)
# assert 0 in user/system indices
self.assertEqual(output["assistant_masks"][:assistant_start], [0] * assistant_start)
self.assertTrue((output_pt["assistant_masks"][0, :assistant_start] == 0).all())
self.assertEqual(
output["assistant_masks"][assistant_end + 1 : assistant_start2],
[0] * (assistant_start2 - assistant_end - 1),
)
self.assertTrue(
(output_pt["assistant_masks"][0, assistant_end + 1 : assistant_start2] == 0).all(),
)
@require_jinja
def test_chat_template_return_assistant_tokens_mask_truncated(self):
dummy_template = (
"{% for message in messages %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|im_start|>' + message['role'] + '\n'}}"
"{% generation %}"
"{{message['content'] + '<|im_end|>'}}"
"{% endgeneration %}"
"{{'\n'}}"
"{% endif %}"
"{% endfor %}"
)
conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{
"role": "assistant",
"content": (
"start turn assistant. long string to be truncated, long string to be truncated, "
"long string to be truncated, long string to be truncated, long string to be truncated"
),
},
{"role": "user", "content": "another user message"},
],
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{
"role": "assistant",
"content": (
"start turn assistant. long string to be truncated, long string to be truncated, "
"long string to be truncated, long string to be truncated, long string to be truncated"
),
},
{"role": "user", "content": "another user message"},
],
]
for tokenizer, pretrained_name, _ in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name)
if tokenizer_r.backend != "tokenizers":
self.skipTest(reason="Custom backend tokenizer")
# Find where to truncate, as the amount of tokens is different for different tokenizers and I want the
# truncation to happen in the middle of the assistant content.
full_encoding = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_dict=True,
)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
truncation_position = full_encoding.char_to_token(chat_string.index(", long string to be truncated,"))
if truncation_position is None:
self.skipTest("char_to_token returned None, cannot determine truncation position")
# check batched
output = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
max_length=truncation_position,
truncation=True,
return_dict=True,
)
for i, conv in enumerate(conversations):
chat_string = tokenizer_r.apply_chat_template(conv, tokenize=False, chat_template=dummy_template)
assistant_start = output.char_to_token(i, chat_string.index("start turn assistant"))
if assistant_start is None:
continue
# assert 1 from assistant_start to the end because the rest is truncated.
self.assertEqual(
output["assistant_masks"][i][assistant_start:],
[1] * (len(output["assistant_masks"][i]) - assistant_start),
)
# check not batched
output = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
max_length=truncation_position,
truncation=True,
)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
assistant_start = output.char_to_token(0, chat_string.index("start turn assistant"))
if assistant_start is None:
return
# assert 1 from assistant_start to the end because the rest is truncated.
self.assertEqual(
output["assistant_masks"][assistant_start:],
[1] * (len(output["assistant_masks"]) - assistant_start),
)
@require_jinja
def test_continue_final_message(self):
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
]
tokenizer = self.get_tokenizer()
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=False
)
self.assertEqual(
output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message<|im_end|>\n",
)
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message",
)
@require_jinja
def test_continue_final_message_with_trim(self):
"""Regression test for chat templates with trimming: https://github.com/huggingface/transformers/pull/34214"""
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message "}, # Note the trailing whitespace
]
tokenizer = self.get_tokenizer()
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=False
)
self.assertEqual(
output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message<|im_end|>\n",
)
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message",
)
@require_jinja
def test_continue_final_message_with_decoy_earlier_message(self):
"""Regression test for chat templates where an earlier message has similar content to the final message
https://github.com/huggingface/transformers/issues/35433"""
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "user", "content": "hi 0"},
{"role": "assistant", "content": "bye: 0"},
{"role": "user", "content": "hi 1"},
{"role": "assistant", "content": "bye: "},
]
tokenizer = self.get_tokenizer()
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>user\nhi 0<|im_end|>\n<|im_start|>assistant\nbye: 0<|im_end|>\n<|im_start|>user\nhi 1<|im_end|>\n<|im_start|>assistant\nbye:",
)
@require_jinja
def test_chat_template_dict(self):
dummy_template_1 = "{{'a'}}"
dummy_template_2 = "{{'b'}}"
dummy_conversation = [
{"role": "user", "content": "user message"},
]
tokenizer = self.get_tokenizer()
tokenizer.chat_template = {"template1": dummy_template_1, "template2": dummy_template_2}
output1 = tokenizer.apply_chat_template(dummy_conversation, chat_template=dummy_template_1, tokenize=False)
output1_via_dict = tokenizer.apply_chat_template(dummy_conversation, chat_template="template1", tokenize=False)
self.assertEqual(output1, output1_via_dict)
output2 = tokenizer.apply_chat_template(dummy_conversation, chat_template=dummy_template_2, tokenize=False)
output2_via_dict = tokenizer.apply_chat_template(dummy_conversation, chat_template="template2", tokenize=False)
self.assertEqual(output2, output2_via_dict)
@require_jinja
def test_chat_template_dict_saving(self):
dummy_template_1 = "{{'a'}}"
dummy_template_2 = "{{'b'}}"
tokenizer = self.get_tokenizer()
for save_jinja_files in (True, False):
tokenizer.chat_template = {"default": dummy_template_1, "template2": dummy_template_2}
with tempfile.TemporaryDirectory() as tmp_dir_name:
# Test that save_jinja_files is ignored when there's a dict of multiple templates
tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=save_jinja_files)
if save_jinja_files:
config_dict = json.load(open(os.path.join(tmp_dir_name, "tokenizer_config.json")))
self.assertNotIn("chat_template", config_dict)
self.assertTrue(os.path.exists(os.path.join(tmp_dir_name, "chat_template.jinja")))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir_name, "additional_chat_templates/template2.jinja"))
)
else:
config_dict = json.load(open(os.path.join(tmp_dir_name, "tokenizer_config.json")))
# Assert that chat templates are correctly serialized as lists of dictionaries
self.assertEqual(
config_dict["chat_template"],
[
{"name": "default", "template": "{{'a'}}"},
{"name": "template2", "template": "{{'b'}}"},
],
)
self.assertFalse(os.path.exists(os.path.join(tmp_dir_name, "chat_template.jinja")))
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
# Assert that the serialized list is correctly reconstructed as a single dict
self.assertEqual(new_tokenizer.chat_template, tokenizer.chat_template)
@require_jinja
def test_chat_template_file_priority(self):
dummy_template1 = "a"
dummy_template2 = "b"
tokenizer = self.get_tokenizer()
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.chat_template = dummy_template1
tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=False)
with Path(tmp_dir_name, "chat_template.jinja").open("w") as f:
f.write(dummy_template2)
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
# Assert the file template clobbers any template in the config
self.assertEqual(new_tokenizer.chat_template, dummy_template2)
def test_number_of_added_tokens(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
seq_0 = "Test this method."
seq_1 = "With these inputs."
sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences))
def test_maximum_encoding_length_single_input(self):
tokenizer = self.get_tokenizer(do_lower_case=False, model_max_length=100)
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer.encode(seq_0, add_special_tokens=False)
total_length = len(sequence)
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it, it's too short")
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1,
model_max_length,
"Issue with the testing sequence, please update it, it's too short",
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Overflowing tokens
stride = 2
information = tokenizer(
seq_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, TokenizersBackend):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
def test_maximum_encoding_length_pair_input(self):
tokenizer = self.get_tokenizer(do_lower_case=False, model_max_length=100)
# Build a sequence from our model's vocabulary
stride = 2
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens), 2 + stride)
seq_1 = "This is another sentence to be encoded."
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
if abs(len(seq0_tokens) - len(seq1_tokens)) <= 2:
seq1_tokens = seq1_tokens + seq1_tokens
seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False)
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens), 2 + stride)
smallest = seq1_tokens if len(seq0_tokens) > len(seq1_tokens) else seq0_tokens
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length - 10, "Issue with the testing sequence, please update it.")
self.assertGreater(total_length2, model_max_length, "Issue with the testing sequence, please update it.")
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_2], [seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
truncated_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[:-2] + tokenizer.encode(
seq_1, add_special_tokens=False
)
truncated_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[
-(2 + stride) :
] + tokenizer.encode(seq_1, add_special_tokens=False)
overflow_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False)
+ tokenizer.encode(seq_1, add_special_tokens=False)[-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, TokenizersBackend):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, TokenizersBackend):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, TokenizersBackend):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens[-(2 + stride) :])
information_second_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, TokenizersBackend):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens[-(2 + stride) :])
def test_special_tokens_mask(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
sequence_0 = "Encode this."
# Testing single inputs
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer(
sequence_0,
add_special_tokens=True,
return_special_tokens_mask=True, # , add_prefix_space=False
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask_input_pairs(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
sequence_0 = "Encode this."
sequence_1 = "This one too please."
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer(
sequence_0,
sequence_1,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_padding_side_in_kwargs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, padding_side="left", **kwargs)
self.assertEqual(tokenizer_r.padding_side, "left")
tokenizer_r = self.get_tokenizer(pretrained_name, padding_side="right", **kwargs)
self.assertEqual(tokenizer_r.padding_side, "right")
self.assertRaises(
ValueError,
self.tokenizer_class.from_pretrained,
pretrained_name,
padding_side="unauthorized",
**kwargs,
)
def test_truncation_side_in_kwargs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, truncation_side="left", **kwargs)
self.assertEqual(tokenizer_r.truncation_side, "left")
tokenizer_r = self.get_tokenizer(pretrained_name, truncation_side="right", **kwargs)
self.assertEqual(tokenizer_r.truncation_side, "right")
self.assertRaises(
ValueError,
self.tokenizer_class.from_pretrained,
pretrained_name,
truncation_side="unauthorized",
**kwargs,
)
def test_encode_basic_padding(self):
"""Test basic left/right padding behavior using encode() method with max_length strategy."""
tokenizer = self.get_tokenizer(do_lower_case=False)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=sequence_length + padding_size, padding="max_length")
padded_sequence_length = len(padded_sequence)
self.assertEqual(sequence_length + padding_size, padded_sequence_length)
self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence)
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=sequence_length + padding_size, padding="max_length")
padded_sequence_length = len(padded_sequence)
self.assertEqual(sequence_length + padding_size, padded_sequence_length)
self.assertEqual([padding_idx] * padding_size + encoded_sequence, padded_sequence)
def test_right_and_left_truncation(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
sequence = "This is a test sequence"
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
truncation_size = 3
tokenizer.truncation_side = "right"
encoded_sequence = tokenizer.encode(sequence, add_special_tokens=False)
sequence_length = len(encoded_sequence)
# Remove EOS/BOS tokens
truncated_sequence = tokenizer.encode(
sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False
)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, truncated_sequence_length + truncation_size)
self.assertEqual(encoded_sequence[:-truncation_size], truncated_sequence)
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the truncation flag set to True
tokenizer.truncation_side = "left"
sequence_length = len(encoded_sequence)
truncated_sequence = tokenizer.encode(
sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False
)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, truncated_sequence_length + truncation_size)
self.assertEqual(encoded_sequence[truncation_size:], truncated_sequence)
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_truncation'
sequence_length = len(encoded_sequence)
tokenizer.truncation_side = "right"
truncated_sequence_right = tokenizer.encode(sequence, truncation=True, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = "left"
truncated_sequence_left = tokenizer.encode(sequence, truncation="longest_first", add_special_tokens=False)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
tokenizer.truncation_side = "right"
truncated_sequence_right = tokenizer.encode(sequence, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = "left"
truncated_sequence_left = tokenizer.encode(sequence, truncation=False, add_special_tokens=False)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
def test_padding_to_multiple_of(self):
tokenizer = self.get_tokenizer()
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
else:
empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer("This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
"This",
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_padding_with_attention_mask(self):
tokenizer = self.get_tokenizer()
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
if "attention_mask" not in tokenizer.model_input_names:
self.skipTest(reason="This model does not use attention mask.")
features = [
{"input_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]},
{"input_ids": [1, 2, 3], "attention_mask": [1, 1, 0]},
]
padded_features = tokenizer.pad(features)
if tokenizer.padding_side == "right":
self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]])
else:
self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
@parameterized.expand([(True,), (False,)])
def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool):
"""
This test checks that padding works as expected when tokenizing a sequence.
Padding is expected to have no effect when the input is a single sequence and
the padding-strategy is not `max_length`. Otherwise it pads to the specified max-length
using tokenizer classes `padding_side` attribute. Also, we check that passing `padding_side`
as call time kwarg works same way as when one sets `tokenizer.padding_side` attribute.
"""
tokenizer = self.get_tokenizer(do_lower_case=False)
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer(sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
not_padded_sequence = tokenizer(
sequence,
padding=True,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
not_padded_sequence = tokenizer(
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
# Test right padding
tokenizer_kwargs_right = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "right"
else:
tokenizer_kwargs_right["padding_side"] = "right"
right_padded_sequence = tokenizer(sequence, **tokenizer_kwargs_right)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertEqual(sequence_length + padding_size, right_padded_sequence_length)
self.assertEqual(input_ids + [padding_idx] * padding_size, right_padded_input_ids)
self.assertEqual(special_tokens_mask + [1] * padding_size, right_padded_special_tokens_mask)
# Test left padding
tokenizer_kwargs_left = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "left"
else:
tokenizer_kwargs_left["padding_side"] = "left"
left_padded_sequence = tokenizer(sequence, **tokenizer_kwargs_left)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertEqual(sequence_length + padding_size, left_padded_sequence_length)
self.assertEqual([padding_idx] * padding_size + input_ids, left_padded_input_ids)
self.assertEqual([1] * padding_size + special_tokens_mask, left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
self.assertEqual(token_type_ids + [token_type_padding_idx] * padding_size, right_padded_token_type_ids)
self.assertEqual([token_type_padding_idx] * padding_size + token_type_ids, left_padded_token_type_ids)
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask)
self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask)
def test_get_vocab(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(["asdfasdfasdfasdf"])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
@slow
def test_conversion_reversible(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
vocab = tokenizer.get_vocab()
for word, ind in vocab.items():
if word == tokenizer.unk_token:
continue
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
def test_call(self):
# Tests that all call wrap to encode_plus
tokenizer = self.get_tokenizer(do_lower_case=False)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# Test not batched
encoded_sequences_1 = tokenizer(sequences[0])
encoded_sequences_2 = tokenizer(sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
encoded_sequences_1 = tokenizer(sequences[0], sequences[1])
encoded_sequences_2 = tokenizer(sequences[0], sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
encoded_sequences_1 = tokenizer(sequences)
encoded_sequences_2 = tokenizer(sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched pairs
encoded_sequences_1 = tokenizer(list(zip(sequences, sequences)))
encoded_sequences_2 = tokenizer(sequences, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizer = self.get_tokenizer(do_lower_case=False)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer(sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer(sequences, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_to_list_format(encoded_sequences_batch))
maximum_length = len(max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len))
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer(sequence, max_length=maximum_length, padding="max_length") for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer(sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_to_list_format(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer(sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer(sequences, max_length=maximum_length + 10, padding="longest")
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer(sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer(sequences, max_length=maximum_length + 10, padding=False)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch and individual encoding
# Right padding tests
tokenizer = self.get_tokenizer(do_lower_case=False)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer(sequence, max_length=max_length, padding="max_length") for sequence in sequences
]
encoded_sequences_batch = tokenizer(sequences, max_length=max_length, padding="max_length")
self.assertListEqual(encoded_sequences, self.convert_batch_to_list_format(encoded_sequences_batch))
# Left padding tests
tokenizer = self.get_tokenizer(do_lower_case=False)
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer(sequence, max_length=max_length, padding="max_length") for sequence in sequences
]
encoded_sequences_batch = tokenizer(sequences, max_length=max_length, padding="max_length")
self.assertListEqual(encoded_sequences, self.convert_batch_to_list_format(encoded_sequences_batch))
def test_pretokenized_inputs(self):
# Test when inputs are pretokenized
# All methods (encode, encode_plus, __call__) go through the same code path,
# so we only test __call__
tokenizer = self.get_tokenizer(do_lower_case=False)
if hasattr(tokenizer, "add_prefix_space") and not tokenizer.add_prefix_space:
return
# Prepare a sequence from our tokenizer vocabulary
sequence, ids = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20)
token_sequence = sequence.split()
# Test single sequence
output = tokenizer(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer(sequence, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer(sequence, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test sequence pairs
output = tokenizer(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer(sequence, sequence, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer(sequence, sequence, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test batched inputs
sequence_batch = [sequence.strip()] * 2 + [sequence.strip() + " " + sequence.strip()]
token_sequence_batch = [s.split() for s in sequence_batch]
sequence_batch_cleaned_up_spaces = [" " + " ".join(s) for s in token_sequence_batch]
output = tokenizer(token_sequence_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer(sequence_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer(token_sequence_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer(sequence_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test batch_encode_plus for pretokenized inputs pairs
sequence_pair_batch = [(sequence.strip(), sequence.strip())] * 2 + [
(sequence.strip() + " " + sequence.strip(), sequence.strip())
]
token_sequence_pair_batch = [tuple(s.split() for s in pair) for pair in sequence_pair_batch]
sequence_pair_batch_cleaned_up_spaces = [
tuple(" " + " ".join(s) for s in pair) for pair in token_sequence_pair_batch
]
output = tokenizer(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
def _check_no_pad_token_padding(self, tokenizer, sequences):
# if tokenizer does v have pad_token_id, an error should be thrown
if tokenizer.pad_token_id is None:
with self.assertRaises(ValueError):
if isinstance(sequences, list):
tokenizer(sequences, padding="longest")
else:
tokenizer(sequences, padding=True)
# add pad_token_id to pass subsequent tests
tokenizer.add_special_tokens({"pad_token": "<PAD>"})
@require_torch
def test_prepare_seq2seq_batch(self):
if not self.test_seq2seq:
self.skipTest(reason="test_seq2seq is set to False")
tokenizer = self.get_tokenizer()
# Longer text that will definitely require truncation.
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
batch = tokenizer(
src_text,
text_target=tgt_text,
max_length=3,
max_target_length=10,
return_tensors="pt",
src_lang="en_XX", # this should be ignored (for all but mbart) but not cause an error
)
except NotImplementedError:
self.skipTest(reason="Encountered NotImplementedError calling prepare_seq2seq_batch")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
batch = tokenizer(src_text, text_target=tgt_text, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
batch_encoder_only = tokenizer(src_text, max_length=3, max_target_length=10, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequence it can returns different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.get_tokenizer(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
if is_torch_available():
returned_tensor = "pt"
else:
self.skipTest(reason="No expected framework (PT) found")
if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
self.skipTest(reason="This tokenizer has no padding token set, or pad_token_id < 0")
tokens = tokenizer(
"HuggingFace is solving NLP one commit at a time",
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
# Mono sample
tokens = tokenizer(
["HuggingFace is solving NLP one commit at a time"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
# Multi sample
tokens = tokenizer(
["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
def test_added_tokens_serialization(self):
new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
# Test loading a tokenizer from the hub with a new eos token
tokenizer_r = self.get_tokenizer(pretrained_name, eos_token=new_eos)
self.assertEqual(tokenizer_r._special_tokens_map["eos_token"], new_eos)
# Check that the token content is present (may not preserve all AddedToken attributes)
self.assertIn(str(new_eos), [str(t) for t in tokenizer_r.added_tokens_decoder.values()])
EXPECTED_ADDED_TOKENS_DECODER = tokenizer_r.added_tokens_decoder
# Test saving and reloading the tokenizer
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_r.save_pretrained(tmp_dir)
with self.subTest("Saving tokenizer locally and reloading"):
tokenizer = self.tokenizer_class.from_pretrained(tmp_dir)
self.assertTrue(str(new_eos) not in tokenizer.extra_special_tokens)
# Check that the token content is present (may not preserve all AddedToken attributes)
self.assertIn(str(new_eos), [str(t) for t in tokenizer.added_tokens_decoder.values()])
self.assertEqual(str(tokenizer.added_tokens_decoder[tokenizer.eos_token_id]), str(new_eos))
# Check that all original tokens are still present (by string representation)
expected_tokens = {str(t) for t in EXPECTED_ADDED_TOKENS_DECODER.values()}
actual_tokens = {str(t) for t in tokenizer.added_tokens_decoder.values()}
self.assertTrue(expected_tokens.issubset(actual_tokens))
def test_tokenizer_initialization_with_conflicting_key(self):
with self.assertRaises(AttributeError, msg="conflicts with the method"):
self.get_tokenizer(add_special_tokens=True)
with self.assertRaises(AttributeError, msg="conflicts with the method"):
self.get_tokenizer(get_vocab=True)
def test_empty_input_string(self):
empty_input_string = ""
tokenizer_return_type = []
output_tensor_type = []
if is_torch_available():
import numpy as np
import torch
tokenizer_return_type.append("pt")
output_tensor_type.append(torch.int64)
tokenizer_return_type.append("np")
output_tensor_type.append(np.int64)
if is_mlx_available():
import mlx.core as mx
tokenizer_return_type.append("mlx")
output_tensor_type.append(mx.int32)
if len(tokenizer_return_type) == 0:
self.skipTest(reason="No expected framework from PT, or MLX found")
tokenizer = self.get_tokenizer()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(empty_input_string, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
def test_pad_token_initialization(self):
"""Test that passing pad_token when creating a tokenizer works correctly."""
tokenizer = self.get_tokenizer(pad_token="[PAD]")
# Verify the pad_token was set correctly
self.assertEqual(tokenizer.pad_token, "[PAD]")
self.assertIsNotNone(tokenizer.pad_token_id)
# Test with two sequences of different lengths to trigger padding
seq_0 = "Test this method."
seq_1 = "With these inputs and some extra tokens here."
# Test padding works with the custom pad_token
output_with_padding = tokenizer(
[seq_0, seq_1],
padding=True,
return_attention_mask=True,
)
# Check that sequences were padded to the same length
self.assertEqual(
len(output_with_padding["input_ids"][0]),
len(output_with_padding["input_ids"][1]),
)
# Check that attention mask has 0s where padding was added (on the shorter sequence)
# Find the shorter sequence
unpadded_lengths = [
len(tokenizer(seq_0, add_special_tokens=True)["input_ids"]),
len(tokenizer(seq_1, add_special_tokens=True)["input_ids"]),
]
shorter_idx = 0 if unpadded_lengths[0] < unpadded_lengths[1] else 1
self.assertIn(0, output_with_padding["attention_mask"][shorter_idx])
def test_bos_token_with_add_bos_token_true(self):
"""Test that passing bos_token with add_bos_token=True during initialization adds the BOS token."""
try:
tokenizer = self.get_tokenizer(bos_token="<BOS>", add_bos_token=True)
except TypeError:
# Some tokenizers might not support add_bos_token parameter
self.skipTest("Tokenizer does not support add_bos_token parameter")
test_string = "Hello world"
# Verify bos_token was set
self.assertEqual(tokenizer.bos_token, "<BOS>")
# Verify the tokenizer was created successfully with these parameters
output = tokenizer(test_string, add_special_tokens=False)
self.assertIsNotNone(output["input_ids"])
def test_bos_token_with_add_bos_token_false(self):
"""Test that passing bos_token with add_bos_token=False during initialization does not add the BOS token."""
try:
tokenizer = self.get_tokenizer(bos_token="<BOS>", add_bos_token=False)
except TypeError:
# Some tokenizers might not support add_bos_token parameter
self.skipTest("Tokenizer does not support add_bos_token parameter")
test_string = "Hello world"
# Verify bos_token was set
self.assertEqual(tokenizer.bos_token, "<BOS>")
# Verify the tokenizer was created successfully with these parameters
output = tokenizer(test_string, add_special_tokens=False)
self.assertIsNotNone(output["input_ids"])
def test_local_files_only(self):
from transformers import AutoTokenizer
pretrained_list = getattr(self, "from_pretrained_id", []) or []
for pretrained_name in pretrained_list:
with self.subTest(f"AutoTokenizer ({pretrained_name})"):
# First cache the tokenizer files
try:
tokenizer_cached = AutoTokenizer.from_pretrained(pretrained_name)
# Now load with local_files_only=True
tokenizer_local = AutoTokenizer.from_pretrained(pretrained_name, local_files_only=True)
# Check that the two tokenizers are identical
self.assertEqual(tokenizer_cached.get_vocab(), tokenizer_local.get_vocab())
self.assertEqual(
tokenizer_cached.all_special_tokens_extended,
tokenizer_local.all_special_tokens_extended,
)
except Exception as _:
pass # if the pretrained model is not loadable how could it pass locally :)
@require_tokenizers
| TokenizerTesterMixin |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_transaction_names.py | {
"start": 428,
"end": 2526
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.url = reverse(
"sentry-api-0-organization-project-cluster-transaction-names",
args=[self.org.slug, self.project.slug],
)
redis_client = get_redis_client()
for transaction in ["/a/b/c/", "/a/foo", "/a/whathever/c/d/", "/not_a/"]:
event = load_data(
"transaction",
timestamp=before_now(minutes=1),
start_timestamp=before_now(minutes=1, milliseconds=500),
)
event["transaction"] = transaction
event["transaction_info"] = {"source": "url"}
self.store_event(event, project_id=self.project.id)
redis_client.sadd(
_get_redis_key(ClustererNamespace.TRANSACTIONS, self.project), transaction
)
def _test_get(self, datasource: str) -> None:
request_data: dict[str, str | int | bool | list[int]] = {
"datasource": datasource,
"project": [self.project.id],
"statsPeriod": "1h",
"limit": 5,
"threshold": 3,
"returnAllNames": True,
}
response = self.client.get(
self.url,
data=request_data,
format="json",
)
assert response.status_code == 200, response.content
data = response.data
data["meta"]["unique_transaction_names"].sort()
assert data == {
"rules": ["/a/*/**"],
"meta": {
"rules_projectoption": {},
"rules_redis": {},
"unique_transaction_names": ["/a/b/c/", "/a/foo", "/a/whathever/c/d/", "/not_a/"],
},
}
def test_get_snuba(self) -> None:
self._test_get("snuba")
def test_get_redis(self) -> None:
self._test_get("redis")
| ProjectTransactionNamesClusterTest |
python | fluentpython__example-code-2e | 10-dp-1class-func/strategy.py | {
"start": 1260,
"end": 1431
} | class ____(NamedTuple):
product: str
quantity: int
price: Decimal
def total(self):
return self.price * self.quantity
@dataclass(frozen=True)
| LineItem |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 71033,
"end": 71162
} | class ____(Visitor):
"""Set .cls pointers to 'None'."""
def EnterClassType(self, node):
node.cls = None
| ClearClassPointers |
python | networkx__networkx | networkx/classes/tests/test_coreviews.py | {
"start": 54,
"end": 1640
} | class ____:
# node->data
def setup_method(self):
self.d = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}}
self.av = nx.classes.coreviews.AtlasView(self.d)
def test_pickle(self):
view = self.av
pview = pickle.loads(pickle.dumps(view, -1))
assert view == pview
assert view.__slots__ == pview.__slots__
pview = pickle.loads(pickle.dumps(view))
assert view == pview
assert view.__slots__ == pview.__slots__
def test_len(self):
assert len(self.av) == len(self.d)
def test_iter(self):
assert list(self.av) == list(self.d)
def test_getitem(self):
assert self.av[1] is self.d[1]
assert self.av[2]["color"] == 1
pytest.raises(KeyError, self.av.__getitem__, 3)
def test_copy(self):
avcopy = self.av.copy()
assert avcopy[0] == self.av[0]
assert avcopy == self.av
assert avcopy[0] is not self.av[0]
assert avcopy is not self.av
avcopy[5] = {}
assert avcopy != self.av
avcopy[0]["ht"] = 4
assert avcopy[0] != self.av[0]
self.av[0]["ht"] = 4
assert avcopy[0] == self.av[0]
del self.av[0]["ht"]
assert not hasattr(self.av, "__setitem__")
def test_items(self):
assert sorted(self.av.items()) == sorted(self.d.items())
def test_str(self):
out = str(self.d)
assert str(self.av) == out
def test_repr(self):
out = "AtlasView(" + str(self.d) + ")"
assert repr(self.av) == out
| TestAtlasView |
python | has2k1__plotnine | plotnine/geoms/geom_abline.py | {
"start": 609,
"end": 3188
} | class ____(geom):
"""
Lines specified by slope and intercept
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {
"color": "black",
"linetype": "solid",
"alpha": 1,
"size": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
"inherit_aes": False,
}
REQUIRED_AES = {"slope", "intercept"}
draw_legend = staticmethod(geom_path.draw_legend)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
data, mapping = order_as_data_mapping(data, mapping)
slope = kwargs.pop("slope", None)
intercept = kwargs.pop("intercept", None)
# If nothing is set, it defaults to y=x
if mapping is None and slope is None and intercept is None:
slope = 1
intercept = 0
if slope is not None or intercept is not None:
if mapping:
warn(
"The 'intercept' and 'slope' when specified override "
"the aes() mapping.",
PlotnineWarning,
)
if isinstance(data, Sized) and len(data):
warn(
"The 'intercept' and 'slope' when specified override "
"the data",
PlotnineWarning,
)
if slope is None:
slope = 1
if intercept is None:
intercept = 0
data = pd.DataFrame(
{"intercept": np.repeat(intercept, 1), "slope": slope}
)
mapping = aes(intercept="intercept", slope="slope")
kwargs["show_legend"] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data["x"] = ranges.x[0]
data["xend"] = ranges.x[1]
data["y"] = ranges.x[0] * data["slope"] + data["intercept"]
data["yend"] = ranges.x[1] * data["slope"] + data["intercept"]
data = data.drop_duplicates()
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True)
geom_segment.draw_group(
gdata, panel_params, coord, ax, self.params
)
| geom_abline |
python | facebookresearch__faiss | contrib/datasets.py | {
"start": 9480,
"end": 10311
} | class ____(Dataset):
"""
get dataset from
https://github.com/stanis-morozov/ip-nsw#dataset
"""
def __init__(self):
Dataset.__init__(self)
self.d, self.nt, self.nb, self.nq = 100, 0, 10**6, 10000
self.metric = 'IP'
self.basedir = dataset_basedir + 'music-100/'
def get_queries(self):
xq = np.fromfile(self.basedir + 'query_music100.bin', dtype='float32')
xq = xq.reshape(-1, 100)
return xq
def get_database(self):
xb = np.fromfile(self.basedir + 'database_music100.bin', dtype='float32')
xb = xb.reshape(-1, 100)
return xb
def get_groundtruth(self, k=None):
gt = np.load(self.basedir + 'gt.npy')
if k is not None:
assert k <= 100
gt = gt[:, :k]
return gt
| DatasetMusic100 |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 186926,
"end": 191629
} | class ____(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False) -> None:
super().__init__(savelist)
if isinstance(expr, str_type):
expr_str = typing.cast(str, expr)
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr_str) # type: ignore[call-arg]
elif issubclass(type(self), self._literalStringClass):
expr = Literal(expr_str)
else:
expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg]
expr = typing.cast(ParserElement, expr)
self.expr = expr
if expr is not None:
self.mayIndexError = expr.mayIndexError
self._may_return_empty = expr.mayReturnEmpty
self.set_whitespace_chars(
expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def recurse(self) -> list[ParserElement]:
return [self.expr] if self.expr is not None else []
def parseImpl(self, instring, loc, do_actions=True):
if self.expr is None:
raise ParseException(instring, loc, "No expression defined", self)
try:
return self.expr._parse(instring, loc, do_actions, callPreParse=False)
except ParseSyntaxException:
raise
except ParseBaseException as pbe:
pbe.pstr = pbe.pstr or instring
pbe.loc = pbe.loc or loc
pbe.parser_element = pbe.parser_element or self
if not isinstance(self, Forward) and self.customName is not None:
if self.errmsg:
pbe.msg = self.errmsg
raise
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
the contained expression.
"""
super().leave_whitespace(recursive)
if recursive:
if self.expr is not None:
self.expr = self.expr.copy()
self.expr.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``ignore_whitespace`` on
the contained expression.
"""
super().ignore_whitespace(recursive)
if recursive:
if self.expr is not None:
self.expr = self.expr.copy()
self.expr.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if not isinstance(other, Suppress) or other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self) -> ParserElement:
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def _checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:({self.expr})"
# Compatibility synonyms
# fmt: off
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
# fmt: on
| ParseElementEnhance |
python | great-expectations__great_expectations | tests/execution_engine/test_sqlalchemy_execution_engine.py | {
"start": 45122,
"end": 45406
} | class ____:
def test_get_connection(self, sa):
execution_engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
with execution_engine.get_connection() as connection:
assert isinstance(connection, Connection)
@pytest.mark.unit
| TestGetConnection |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 35574,
"end": 37954
} | class ____(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.data2vec_vision(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitConvModule with Beit->Data2VecVision
| Data2VecVisionForImageClassification |
python | python__mypy | mypyc/ir/ops.py | {
"start": 11640,
"end": 12031
} | class ____(Op):
"""Abstract base class for control flow operations."""
def targets(self) -> Sequence[BasicBlock]:
"""Get all basic block targets of the control operation."""
return ()
def set_target(self, i: int, new: BasicBlock) -> None:
"""Update a basic block target."""
raise AssertionError(f"Invalid set_target({self}, {i})")
@final
| ControlOp |
python | pytorch__pytorch | torch/distributed/fsdp/_trace_utils.py | {
"start": 254,
"end": 1354
} | class ____:
"""
This represents a symbolic tracing configuration.
Args:
tracer (torch.fx.Tracer): An instance of :class:`torch.fx.Tracer` to
use for symbolic tracing. The default value is the native
:class:`torch.fx.Tracer` constructed with default arguments.
However, the user may want to pass a different value such as the
``HFTracer`` for models in the HuggingFace Transformers_ library.
.. _Transformers: https://huggingface.co/docs/transformers/index
concrete_args (Optional[Dict[str, Any]]): Concrete arguments that
should not be treated as ``torch.fx.Proxy`` when tracing the
module ``forward()``. Passing ``concrete_args`` allows partially
specializing the forward, e.g. to remove control flow or data
structures. This ``concrete_args`` here is the same argument used
in :meth:`~torch.fx.Tracer.trace`.
"""
tracer: torch.fx.Tracer = field(default_factory=torch.fx.Tracer)
concrete_args: Optional[dict[str, Any]] = None
| TracingConfig |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_neptune.py | {
"start": 1487,
"end": 2170
} | class ____:
def test_get_conn_returns_a_boto3_connection(self):
hook = NeptuneHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
def test_get_cluster_status(self, neptune_hook: NeptuneHook, neptune_cluster_id):
assert neptune_hook.get_cluster_status(neptune_cluster_id) is not None
@mock.patch.object(NeptuneHook, "get_waiter")
def test_wait_for_cluster_instance_availability(
self, mock_get_waiter, neptune_hook: NeptuneHook, neptune_cluster_id
):
neptune_hook.wait_for_cluster_instance_availability(neptune_cluster_id)
mock_get_waiter.assert_called_once_with("db_instance_available")
| TestNeptuneHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-qdrant/destination_qdrant/destination.py | {
"start": 807,
"end": 2680
} | class ____(Destination):
indexer: Indexer
embedder: Embedder
def _init_indexer(self, config: ConfigModel):
self.embedder = create_from_config(config.embedding, config.processing)
self.indexer = QdrantIndexer(config.indexing, self.embedder.embedding_dimensions)
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
config_model = ConfigModel.parse_obj(config)
self._init_indexer(config_model)
writer = Writer(
config_model.processing, self.indexer, self.embedder, batch_size=BATCH_SIZE, omit_raw_text=config_model.omit_raw_text
)
yield from writer.write(configured_catalog, input_messages)
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
parsed_config = ConfigModel.parse_obj(config)
self._init_indexer(parsed_config)
checks = [self.embedder.check(), self.indexer.check(), DocumentProcessor.check_config(parsed_config.processing)]
errors = [error for error in checks if error is not None]
if len(errors) > 0:
return AirbyteConnectionStatus(status=Status.FAILED, message="\n".join(errors))
else:
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/destinations/qdrant",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append, DestinationSyncMode.append_dedup],
connectionSpecification=ConfigModel.schema(), # type: ignore[attr-defined]
)
| DestinationQdrant |
python | kamyu104__LeetCode-Solutions | Python/find-numbers-with-even-number-of-digits.py | {
"start": 107,
"end": 668
} | class ____(object):
def __init__(self):
M = 10**5
self.__lookup = [0]
i = 10
while i < M:
self.__lookup.append(i)
i *= 10
self.__lookup.append(i)
def findNumbers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def digit_count(n):
return bisect.bisect_right(self.__lookup, n)
return sum(digit_count(n) % 2 == 0 for n in nums)
# Time: O(nlogm), n the length of nums, m is the max value of nums
# Space: O(logm)
| Solution |
python | getsentry__sentry | tests/sentry/auth/test_access.py | {
"start": 42518,
"end": 43094
} | class ____(TestCase):
def test_no_access(self) -> None:
result = access.DEFAULT
assert result.sso_is_valid
assert not result.scopes
assert not result.has_team_access(Mock())
assert not result.has_team_scope(Mock(), "project:read")
assert not result.has_project_access(Mock())
assert not result.has_projects_access([Mock()])
assert not result.has_project_scope(Mock(), "project:read")
assert not result.has_project_membership(Mock())
assert not result.permissions
@no_silo_test
| DefaultAccessTest |
python | davidhalter__jedi | jedi/api/exceptions.py | {
"start": 361,
"end": 503
} | class ____(_JediError):
"""
This error is reserved for the future, shouldn't really be happening at the
moment.
"""
| WrongVersion |
python | spack__spack | lib/spack/spack/vendor/attr/validators.py | {
"start": 11864,
"end": 13222
} | class ____:
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator)
@attrs(repr=False, frozen=True, slots=True)
| _DeepMapping |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/vertex_ai/feature_store.py | {
"start": 1253,
"end": 4486
} | class ____(BaseSensorOperator):
"""
Sensor to monitor the state of a Vertex AI Feature View sync operation.
:param feature_view_sync_name: The name of the feature view sync operation to monitor. (templated)
:param location: Required. The Cloud region in which to handle the request. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param wait_timeout: How many seconds to wait for sync to complete.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials.
"""
template_fields: Sequence[str] = ("location", "feature_view_sync_name")
ui_color = "#f0eee4"
def __init__(
self,
*,
feature_view_sync_name: str,
location: str,
gcp_conn_id: str = "google_cloud_default",
wait_timeout: int | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.feature_view_sync_name = feature_view_sync_name
self.location = location
self.gcp_conn_id = gcp_conn_id
self.wait_timeout = wait_timeout
self.impersonation_chain = impersonation_chain
self.start_sensor_time: float | None = None
def execute(self, context: Context) -> None:
self.start_sensor_time = time.monotonic()
super().execute(context)
def _duration(self):
return time.monotonic() - self.start_sensor_time
def poke(self, context: Context) -> bool:
hook = FeatureStoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
response = hook.get_feature_view_sync(
location=self.location,
feature_view_sync_name=self.feature_view_sync_name,
)
# Check if the sync has completed by verifying end_time exists
if response.get("end_time", 0) > 0:
self.log.info(
"Feature View sync %s completed. Rows synced: %d, Total slots: %d",
self.feature_view_sync_name,
int(response.get("sync_summary", "").get("row_synced", "")),
int(response.get("sync_summary", "").get("total_slot", "")),
)
return True
if self.wait_timeout and self._duration() > self.wait_timeout:
raise AirflowException(
f"Timeout: Feature View sync {self.feature_view_sync_name} "
f"not completed after {self.wait_timeout}s"
)
self.log.info("Waiting for Feature View sync %s to complete.", self.feature_view_sync_name)
return False
except Exception as e:
if self.wait_timeout and self._duration() > self.wait_timeout:
raise AirflowException(
f"Timeout: Feature View sync {self.feature_view_sync_name} "
f"not completed after {self.wait_timeout}s"
)
self.log.info("Error checking sync status, will retry: %s", str(e))
return False
| FeatureViewSyncSensor |
python | ray-project__ray | python/ray/data/_internal/execution/interfaces/op_runtime_metrics.py | {
"start": 4274,
"end": 5754
} | class ____(type):
def __init__(cls, name, bases, dict):
# NOTE: `Field.name` isn't set until the dataclass is created, so we can't
# create the metrics in `metric_field` directly.
super().__init__(name, bases, dict)
# Iterate over the attributes and methods of 'OpRuntimeMetrics'.
for name, value in dict.items():
# If an attribute is a dataclass field and has _IS_FIELD_METRIC_KEY in its
# metadata, then create a metric from the field metadata and add it to the
# list of metrics. See also the 'metric_field' function.
if isinstance(value, Field) and value.metadata.get(_IS_FIELD_METRIC_KEY):
metric = MetricDefinition(
name=name,
description=value.metadata[_METRIC_FIELD_DESCRIPTION_KEY],
metrics_group=value.metadata[_METRIC_FIELD_METRICS_GROUP_KEY],
metrics_type=value.metadata[_METRIC_FIELD_METRICS_TYPE_KEY],
metrics_args=value.metadata[_METRIC_FIELD_METRICS_ARGS_KEY],
map_only=value.metadata[_METRIC_FIELD_IS_MAP_ONLY_KEY],
)
_METRICS.append(metric)
def node_id_from_block_metadata(meta: BlockMetadata) -> str:
if meta.exec_stats is not None and meta.exec_stats.node_id is not None:
node_id = meta.exec_stats.node_id
else:
node_id = NODE_UNKNOWN
return node_id
| OpRuntimesMetricsMeta |
python | getsentry__sentry | src/sentry/uptime/types.py | {
"start": 3194,
"end": 3405
} | class ____:
"""
Represents data used for uptime summary
"""
total_checks: int
failed_checks: int
downtime_checks: int
missed_window_checks: int
avg_duration_us: float
| UptimeSummary |
python | pennersr__django-allauth | allauth/socialaccount/providers/notion/views.py | {
"start": 167,
"end": 745
} | class ____(OAuth2Adapter):
provider_id = "notion"
basic_auth = True
client_class = NotionOAuth2Client
authorize_url = "https://api.notion.com/v1/oauth/authorize"
access_token_url = "https://api.notion.com/v1/oauth/token" # nosec
def complete_login(self, request, app, token, **kwargs):
return self.get_provider().sociallogin_from_response(
request, kwargs["response"]
)
oauth2_login = OAuth2LoginView.adapter_view(NotionOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(NotionOAuth2Adapter)
| NotionOAuth2Adapter |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_comprehend.py | {
"start": 1350,
"end": 1734
} | class ____:
EXPECTED_WAITER_NAME: str | None = None
JOB_ID: str | None = None
def test_setup(self):
# Ensure that all subclasses have an expected waiter name set.
if self.__class__.__name__ != "TestBaseComprehendTrigger":
assert isinstance(self.EXPECTED_WAITER_NAME, str)
assert isinstance(self.JOB_ID, str)
| TestBaseComprehendTrigger |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 24931,
"end": 25818
} | class ____(ASTTrailingTypeSpec):
def __init__(self, names: list[str]) -> None:
assert len(names) != 0
self.names = names
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTrailingTypeSpecFundamental):
return NotImplemented
return self.names == other.names
def __hash__(self) -> int:
return hash(self.names)
def _stringify(self, transform: StringifyTransform) -> str:
return ' '.join(self.names)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
first = True
for n in self.names:
if not first:
signode += addnodes.desc_sig_space()
else:
first = False
signode += addnodes.desc_sig_keyword_type(n, n)
| ASTTrailingTypeSpecFundamental |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 272299,
"end": 273263
} | class ____:
rng = np.random.default_rng(3417115752)
x, y = rng.random((2, 10))
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_ranksums_result_attributes(self, alternative):
# ranksums pval = mannwhitneyu pval w/out continuity or tie correction
res1 = stats.ranksums(self.x, self.y,
alternative=alternative).pvalue
res2 = stats.mannwhitneyu(self.x, self.y, use_continuity=False,
alternative=alternative).pvalue
assert_allclose(res1, res2)
def test_ranksums_named_results(self):
res = stats.ranksums(self.x, self.y)
check_named_results(res, ('statistic', 'pvalue'))
def test_input_validation(self):
with assert_raises(ValueError, match="`alternative` must be 'less'"):
stats.ranksums(self.x, self.y, alternative='foobar')
@make_xp_test_case(stats.jarque_bera)
| TestRankSums |
python | pytest-dev__pytest-cov | src/pytest_cov/engine.py | {
"start": 476,
"end": 1345
} | class ____:
@staticmethod
def write(v):
pass
@contextlib.contextmanager
def _backup(obj, attr):
backup = getattr(obj, attr)
try:
setattr(obj, attr, copy.copy(backup))
yield
finally:
setattr(obj, attr, backup)
def _ensure_topdir(meth):
@functools.wraps(meth)
def ensure_topdir_wrapper(self, *args, **kwargs):
try:
original_cwd = Path.cwd()
except OSError:
# Looks like it's gone, this is non-ideal because a side-effect will
# be introduced in the tests here but we can't do anything about it.
original_cwd = None
os.chdir(self.topdir)
try:
return meth(self, *args, **kwargs)
finally:
if original_cwd is not None:
os.chdir(original_cwd)
return ensure_topdir_wrapper
| _NullFile |
python | huggingface__transformers | tests/models/siglip/test_modeling_siglip.py | {
"start": 16972,
"end": 20307
} | class ____(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
additional_model_inputs = ["pixel_values"]
all_model_classes = (SiglipModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": SiglipModel} if is_torch_available() else {}
test_resize_embeddings = False
test_attention_outputs = False
# MP works but offload doesn't work when the MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
_is_composite = True
def setUp(self):
self.model_tester = SiglipModelTester(self)
self.config_tester = ConfigTester(self, config_class=SiglipConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_inputs_embeds
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_retain_grad_hidden_states_attentions
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="SiglipModel does not have input/output embeddings")
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_get_set_embeddings
def test_model_get_set_embeddings(self):
pass
# Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_load_vision_text_config with CLIP->Siglip
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save SiglipConfig and check if we can load SiglipVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = SiglipVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save SiglipConfig and check if we can load SiglipTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = SiglipTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "google/siglip-base-patch16-224"
model = SiglipModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| SiglipModelTest |
python | huggingface__transformers | src/transformers/models/dbrx/modeling_dbrx.py | {
"start": 8392,
"end": 11872
} | class ____(nn.Module):
"""Modular DBRX attention component that can be reused across different model architectures."""
def __init__(
self,
config,
layer_idx: Optional[int] = None,
**kwargs,
):
super().__init__()
self.config = config
self.hidden_size = config.d_model
self.num_heads = config.n_heads
self.head_dim = self.hidden_size // self.num_heads
self.max_position_embeddings = config.max_seq_len
self.layer_idx = layer_idx
attn_config = config.attn_config
self.attention_dropout = attn_config.attn_pdrop
self.clip_qkv = attn_config.clip_qkv
self.num_key_value_heads = attn_config.kv_n_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = attn_config.rope_theta
self.is_causal = True
self.Wqkv = nn.Linear(
self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False
)
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
qkv_states = self.Wqkv(hidden_states)
min_val = -self.clip_qkv if self.clip_qkv is not None else None
qkv_states = qkv_states.clamp(min=min_val, max=self.clip_qkv)
query_states, key_states, value_states = qkv_states.split(
[
self.hidden_size,
self.num_key_value_heads * self.head_dim,
self.num_key_value_heads * self.head_dim,
],
dim=2,
)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| DbrxAttention |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 4537,
"end": 4665
} | class ____(models.Model):
nodes = GenericRelation(Node)
related_obj = models.ForeignKey("Related", models.CASCADE)
| Content |
python | has2k1__plotnine | plotnine/geoms/geom_hline.py | {
"start": 584,
"end": 2481
} | class ____(geom):
"""
Horizontal line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {
"color": "black",
"linetype": "solid",
"size": 0.5,
"alpha": 1,
}
REQUIRED_AES = {"yintercept"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
"inherit_aes": False,
}
draw_legend = staticmethod(geom_path.draw_legend)
legend_key_size = staticmethod(geom_path.legend_key_size)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
data, mapping = order_as_data_mapping(data, mapping)
yintercept = kwargs.pop("yintercept", None)
if yintercept is not None:
if mapping:
warn(
"The 'yintercept' parameter has overridden "
"the aes() mapping.",
PlotnineWarning,
)
data = pd.DataFrame({"yintercept": np.repeat(yintercept, 1)})
mapping = aes(yintercept="yintercept")
kwargs["show_legend"] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data["y"] = data["yintercept"]
data["yend"] = data["yintercept"]
data["x"] = ranges.x[0]
data["xend"] = ranges.x[1]
data = data.drop_duplicates()
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True)
geom_segment.draw_group(
gdata, panel_params, coord, ax, self.params
)
| geom_hline |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 28925,
"end": 29658
} | class ____(VarArray):
"""
Handles a variable-length array of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), True
parts = self._splitter(value, config, pos)
parse_parts = self._base.parse_parts
result = []
result_mask = []
for i in range(0, len(parts), 2):
value = [float(x) for x in parts[i : i + 2]]
value, mask = parse_parts(value, config, pos)
result.append(value)
result_mask.append(mask)
return (
_make_masked_array(np.array(result, dtype=self._base.format), result_mask),
False,
)
| ComplexVarArray |
python | keras-team__keras | keras/src/backend/common/remat_test.py | {
"start": 351,
"end": 2966
} | class ____(testing.TestCase):
def setUp(self):
"""Reset global state before each test."""
global_state.clear_session()
def test_remat_scope_activation(self):
self.assertIsNone(
get_current_remat_mode()
) # Initially, no mode is active
with RematScope(mode="full"):
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Mode is set to "full"
self.assertIsNone(
get_current_remat_mode()
) # Mode is restored to None after scope ends
def test_remat_scope_nested(self):
"""Test nested scopes with different rematerialization modes."""
with RematScope(mode="full"):
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Outer scope is "full"
with RematScope(mode="activations"):
self.assertEqual(
get_current_remat_mode().mode, "activations"
) # Inner scope is "activations"
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Back to outer scope
self.assertIsNone(
get_current_remat_mode()
) # Mode is restored to None after all scopes
def test_remat_scope_stack_management(self):
"""Test that the remat_scope_stack is managed correctly."""
self.assertIsNone(
global_state.get_global_attribute("remat_scope_stack")
) # No stack initially
with RematScope(mode="full"):
remat_stack = global_state.get_global_attribute("remat_scope_stack")
self.assertIsNotNone(remat_stack) # Stack is initialized
self.assertEqual(len(remat_stack), 1) # Stack contains one entry
with RematScope(mode="activations"):
remat_stack = global_state.get_global_attribute(
"remat_scope_stack"
)
self.assertEqual(
len(remat_stack), 2
) # Stack contains two entries
remat_stack = global_state.get_global_attribute("remat_scope_stack")
self.assertEqual(len(remat_stack), 1) # Back to one entry
self.assertEqual(
global_state.get_global_attribute("remat_scope_stack"), []
) # Stack is cleared
def test_invalid_mode(self):
"""Test that invalid rematerialization modes raise an error."""
with self.assertRaises(ValueError):
RematScope(mode="invalid") # Invalid mode should raise ValueError
| TestRematScope |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/provider_dependencies.py | {
"start": 7708,
"end": 17720
} | class ____(NamedTuple):
airflow_version: str
constraints_files: list[ConstraintsForPython]
def load_constraints() -> dict[str, AirflowVersionConstraints]:
get_console().print("[info]Loading constraints for all Airflow versions[/]")
all_constraints: dict[str, AirflowVersionConstraints] = {}
for filename in sorted(CONSTRAINTS_CACHE_PATH.glob("constraints-*-python-*.txt")):
filename_match = MATCH_CONSTRAINTS_FILE_REGEX.match(filename.name)
if filename_match:
airflow_version = filename_match.group(1)
python_version = filename_match.group(2)
if airflow_version not in all_constraints:
airflow_version_constraints = AirflowVersionConstraints(
airflow_version=airflow_version, constraints_files=[]
)
all_constraints[airflow_version] = airflow_version_constraints
else:
airflow_version_constraints = all_constraints[airflow_version]
package_dict: dict[str, PackageInfo] = {}
for line in filename.read_text().splitlines():
if line and not line.startswith("#"):
package_name, version = line.split("==")
package_dict[package_name] = PackageInfo(package_name=package_name, version=version)
airflow_version_constraints.constraints_files.append(
ConstraintsForPython(python_version=python_version, packages=package_dict)
)
get_console().print("[info]Constraints loaded[/]\n")
if get_verbose():
get_console().print("[info]All constraints loaded:\n")
for airflow_version, constraints in all_constraints.items():
get_console().print(f"[info]Airflow version: {airflow_version}[/]")
for constraints_file in constraints.constraints_files:
get_console().print(f" Python version: {constraints_file.python_version}")
return all_constraints
START_AIRFLOW_VERSION_FROM = "0.0.0"
def generate_providers_metadata_for_provider(
provider_id: str,
provider_version: str | None,
constraints: dict[str, AirflowVersionConstraints],
all_airflow_releases: list[str],
airflow_release_dates: dict[str, str],
current_metadata: dict[str, dict[str, dict[str, str]]],
) -> dict[str, dict[str, str]]:
get_console().print(f"[info]Generating metadata for {provider_id}")
provider_yaml_dict = get_provider_distributions_metadata().get(provider_id)
provider_metadata: dict[str, dict[str, str]] = {}
package_name = "apache-airflow-providers-" + provider_id.replace(".", "-")
provider_versions = list(reversed(provider_yaml_dict["versions"]))
provider_metadata_found = False
if get_verbose():
get_console().print(f"[info]Provider {provider_id} versions:")
get_console().print(provider_versions)
if provider_version and provider_version not in provider_versions:
get_console().print(
f"[error]Provider {provider_id} version {provider_version} is not in the list of versions: "
f"{provider_versions}. Skipping it."
)
sys.exit(1)
old_provider_metadata = current_metadata.get(provider_id, {})
for current_provider_version in provider_versions:
if provider_version and current_provider_version != provider_version:
continue
exact_provider_version_found_in_constraints = False
provider_date_released = get_tag_date(
tag="providers-" + provider_id.replace(".", "-") + "/" + current_provider_version
)
if not provider_date_released:
continue
if get_verbose():
get_console().print(
f"[info]Checking provider {provider_id} version {current_provider_version} released on {provider_date_released}"
)
airflow_date_released = airflow_release_dates[all_airflow_releases[0]]
last_airflow_version = START_AIRFLOW_VERSION_FROM
for airflow_version in all_airflow_releases:
airflow_date_released = airflow_release_dates[airflow_version]
if get_verbose():
get_console().print(
f"[info]Checking airflow_version {airflow_version} released on {airflow_date_released}"
)
for python_version_constraint_file in constraints[airflow_version].constraints_files:
if get_verbose():
get_console().print(
f"[info]Checking constraints for Python {python_version_constraint_file.python_version}"
)
package_info = python_version_constraint_file.packages.get(package_name)
if not package_info:
if get_verbose():
get_console().print(
f"[info]Package {package_name} not found in constraints for Airflow {airflow_version} "
f"and Python version {python_version_constraint_file.python_version}"
)
else:
if get_verbose():
get_console().print(
f"[info]Package {package_name} found in constraints for Airflow {airflow_version} "
f"and Python version {python_version_constraint_file.python_version}: {package_info}"
)
if package_info and package_info.version == current_provider_version:
last_airflow_version = airflow_version
exact_provider_version_found_in_constraints = True
if get_verbose():
get_console().print(
f"[success]Package {package_name} in version {current_provider_version} "
f"found in constraints for Airflow {airflow_version} and "
f"Python version {python_version_constraint_file.python_version}"
)
break
if (
airflow_date_released > provider_date_released
and last_airflow_version == START_AIRFLOW_VERSION_FROM
):
# released before first Airflow version so it should be associated with the
# first "real" Airflow version released after it - but in case it was actually
# mentioned later in constraints, we will override it later
last_airflow_version = airflow_version
if get_verbose():
get_console().print(
f"[warning]Provider {provider_id} version {current_provider_version} released on "
f"{provider_date_released} could be associated with {airflow_version} that "
f"was released on {airflow_date_released}. Setting it as candidate."
)
if exact_provider_version_found_in_constraints:
break
if last_airflow_version == START_AIRFLOW_VERSION_FROM:
# If we did not find any Airflow version that is associated with this provider version
# we will add the latest released Airflow version
last_airflow_version = all_airflow_releases[-1]
old_provider_metadata_for_version = old_provider_metadata.get(current_provider_version, {})
new_provider_metadata_for_version = {
"associated_airflow_version": last_airflow_version,
"date_released": provider_date_released,
}
provider_metadata[current_provider_version] = new_provider_metadata_for_version
provider_version_metadata_changed_or_added = False
if old_provider_metadata_for_version:
if (
old_provider_metadata_for_version["associated_airflow_version"] != last_airflow_version
or old_provider_metadata_for_version["date_released"] != provider_date_released
):
get_console().print(
f"[warning]Old provider metadata for {provider_id} version {current_provider_version} "
f"released on {provider_date_released} differs: "
f"Old metadata: {old_provider_metadata_for_version}. "
f"New metadata: {new_provider_metadata_for_version}."
)
provider_version_metadata_changed_or_added = True
else:
provider_version_metadata_changed_or_added = True
if exact_provider_version_found_in_constraints:
provider_metadata_found = True
if get_verbose() or provider_version_metadata_changed_or_added:
get_console().print(
f"[success]Provider {provider_id} version {current_provider_version} released on {provider_date_released} "
f"is associated with Airflow {last_airflow_version} released on {airflow_date_released}"
)
else:
if get_verbose() or provider_version_metadata_changed_or_added:
get_console().print(
f"[warning]Provider {provider_id} version {current_provider_version} released on {provider_date_released} "
f"was not mentioned in any Airflow version in constraints. Assuming {last_airflow_version} "
f"released on {airflow_date_released} that was released after it."
)
provider_metadata_found = True
if not provider_metadata_found:
get_console().print(
f"[warning]No constraints mention {provider_id} in any Airflow version in any Python version. "
f"Skipping it altogether."
)
return {}
if get_verbose():
get_console().print(f"[success]Metadata for {provider_id} found:\n")
get_console().print(provider_metadata)
return provider_metadata
| AirflowVersionConstraints |
python | huggingface__transformers | tests/models/wav2vec2/test_feature_extraction_wav2vec2.py | {
"start": 1309,
"end": 3099
} | class ____:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=16000,
return_attention_mask=True,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
speech_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
| Wav2Vec2FeatureExtractionTester |
python | fluentpython__example-code-2e | 14-inheritance/diamond.py | {
"start": 1243,
"end": 1376
} | class ____(A, B): # <4>
def ping(self):
print(f'{self}.ping() in Leaf')
super().ping()
# end::DIAMOND_CLASSES[]
| Leaf |
python | fastai__fastai | fastai/text/core.py | {
"start": 11501,
"end": 14579
} | class ____(Transform):
"Provides a consistent `Transform` interface to tokenizers operating on `DataFrame`s and folders"
input_types = (str, list, L, tuple, Path)
def __init__(self, tok, rules=None, counter=None, lengths=None, mode=None, sep=' '):
if isinstance(tok,type): tok=tok()
store_attr('tok,counter,lengths,mode,sep')
self.rules = defaults.text_proc_rules if rules is None else rules
@classmethod
@delegates(tokenize_df, keep=True)
def from_df(cls, text_cols, tok=None, rules=None, sep=' ', **kwargs):
if tok is None: tok = WordTokenizer()
res = cls(tok, rules=rules, mode='df')
res.kwargs,res.train_setup = merge({'tok': tok}, kwargs),False
res.text_cols,res.sep = text_cols,sep
default_val = inspect.signature(tokenize_df).parameters['tok_text_col'].default
res.tok_text_col = kwargs.get('tok_text_col', default_val)
return res
@classmethod
@delegates(tokenize_folder, keep=True)
def from_folder(cls, path, tok=None, rules=None, **kwargs):
path = Path(path)
if tok is None: tok = WordTokenizer()
output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
res.path,res.output_dir = path,output_dir
return res
def setups(self, dsets):
if not self.mode == 'df' or not isinstance(dsets.items, pd.DataFrame): return
dsets.items,count = tokenize_df(dsets.items, self.text_cols, rules=self.rules, **self.kwargs)
if self.counter is None: self.counter = count
if self.lengths is None: self.lengths = dsets.items[f'{self.tok_text_col}_length'].values
return dsets
def encodes(self, o:Path):
if self.mode=='folder' and str(o).startswith(str(self.path)):
tok = self.output_dir/o.relative_to(self.path)
return L(tok.read_text(encoding='UTF-8').split(' '))
else: return self._tokenize1(o.read_text())
def encodes(self, o:str): return self._tokenize1(o)
def _tokenize1(self, o): return first(self.tok([compose(*self.rules)(o)]))
def get_lengths(self, items):
if self.lengths is None: return None
if self.mode == 'df':
if isinstance(items, pd.DataFrame) and f'{self.tok_text_col}_length' in items.columns:
return items[f'{self.tok_text_col}_length'].values
if self.mode == 'folder':
try:
res = [self.lengths[str(Path(i).relative_to(self.path))] for i in items]
if len(res) == len(items): return res
except: return None
def decodes(self, o): return TitledStr(self.sep.join(o))
# %% ../../nbs/30_text.core.ipynb 75
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
# %% ../../nbs/30_text.core.ipynb 76
| Tokenizer |
python | pytorch__pytorch | benchmarks/tensorexpr/benchmark.py | {
"start": 8617,
"end": 10883
} | class ____:
r"""
An Auxiliary class for dynamic shape benchmarks
Pre-computes input with random shapes and also
modifies the compute method so in each call the
fuser sees a different input tensor shape
"""
# Number of random inputs in an instance
SAMPLE_SIZE = 100
def __init__(self, dynamic_range=1.2):
self._input_samples = []
self._input_sample_index = 0
self._dynamic_range = (
1.0 / dynamic_range if dynamic_range > 1.0 else dynamic_range
)
self._enable_dynamic_shapes = True
# Returns the input test case that current index points to
@property
def inputs(self):
return self._input_samples[self._input_sample_index]
# An inputs assignment actually adds a test case in the class buffer
@inputs.setter
def inputs(self, val):
self._input_samples.append(val)
# Runs normal compute while increment test case index
def compute(self):
super().compute()
self._input_sample_index = (self._input_sample_index + 1) % self.SAMPLE_SIZE
# Defined by benchmark, the benchmark needs to specify the input
# tensor construction in this method, essentially the same way
# a benchmark creates the inputs list in the initializer
def instantiate_input(self):
raise NotImplementedError
# Instantiate random shaped inputs and start the benchmark run
def run(self, args):
# force disable dynamic shape from command line
if args.no_dynamic_shape:
self._enable_dynamic_shapes = False
self.load_inputs()
super().run(args)
# pre-compute inputs so the creations of random tensors
# do not add to the compute time
def load_inputs(self):
for i in range(self.SAMPLE_SIZE - 1):
self.instantiate_input()
# returns a randomized shape
def rand_shape(self, shape):
if not self._enable_dynamic_shapes:
return shape
ratios = np.random.uniform(self._dynamic_range, 1.0, len(shape))
dyn_shape = list(np.multiply(shape, ratios).astype(int))
return dyn_shape
benchmark_classes = []
def register_benchmark_class(benchmark_cls):
benchmark_classes.append(benchmark_cls)
| DynamicShape |
python | spack__spack | lib/spack/spack/util/windows_registry.py | {
"start": 545,
"end": 5342
} | class ____:
"""
Class wrapping a Windows registry key
"""
def __init__(self, name, handle):
self.path = name
self.name = os.path.split(name)[-1]
self._handle = handle
self._keys = []
self._values = {}
@property
def values(self):
"""Returns all subvalues of this key as RegistryValue objects in dictionary
of value name : RegistryValue object
"""
self._gather_value_info()
return self._values
@property
def subkeys(self):
"""Returns list of all subkeys of this key as RegistryKey objects"""
self._gather_subkey_info()
return self._keys
@property
def hkey(self):
return self._handle
@contextmanager
def winreg_error_handler(self, name, *args, **kwargs):
try:
yield
except OSError as err:
# Expected errors that occur on occasion, these are easily
# debug-able and have sufficiently verbose reporting and obvious cause
# [WinError 2]: the system cannot find the file specified - lookup item does
# not exist
# [WinError 5]: Access is denied - user not in key's ACL
if hasattr(err, "winerror") and err.winerror in (5, 2):
raise err
# Other OS errors are more difficult to diagnose, so we wrap them in some extra
# reporting
raise InvalidRegistryOperation(name, err, *args, **kwargs) from err
def OpenKeyEx(self, subname, **kwargs):
"""Convenience wrapper around winreg.OpenKeyEx"""
tty.debug(
f"[WINREG ACCESS] Accessing Reg Key {self.path}/{subname} with"
f" {kwargs.get('access', 'default')} access"
)
with self.winreg_error_handler("OpenKeyEx", subname, **kwargs):
return winreg.OpenKeyEx(self.hkey, subname, **kwargs)
def QueryInfoKey(self):
"""Convenience wrapper around winreg.QueryInfoKey"""
tty.debug(f"[WINREG ACCESS] Obtaining key,value information from key {self.path}")
with self.winreg_error_handler("QueryInfoKey"):
return winreg.QueryInfoKey(self.hkey)
def EnumKey(self, index):
"""Convenience wrapper around winreg.EnumKey"""
tty.debug(
"[WINREG ACCESS] Obtaining name of subkey at index "
f"{index} from registry key {self.path}"
)
with self.winreg_error_handler("EnumKey", index):
return winreg.EnumKey(self.hkey, index)
def EnumValue(self, index):
"""Convenience wrapper around winreg.EnumValue"""
tty.debug(
f"[WINREG ACCESS] Obtaining value at index {index} from registry key {self.path}"
)
with self.winreg_error_handler("EnumValue", index):
return winreg.EnumValue(self.hkey, index)
def QueryValueEx(self, name, **kwargs):
"""Convenience wrapper around winreg.QueryValueEx"""
tty.debug(f"[WINREG ACCESS] Obtaining value {name} from registry key {self.path}")
with self.winreg_error_handler("QueryValueEx", name, **kwargs):
return winreg.QueryValueEx(self.hkey, name, **kwargs)
def __str__(self):
return self.name
def _gather_subkey_info(self):
"""Composes all subkeys into a list for access"""
if self._keys:
return
sub_keys, _, _ = self.QueryInfoKey()
for i in range(sub_keys):
sub_name = self.EnumKey(i)
try:
sub_handle = self.OpenKeyEx(sub_name, access=winreg.KEY_READ)
self._keys.append(RegistryKey(os.path.join(self.path, sub_name), sub_handle))
except OSError as e:
if hasattr(e, "winerror") and e.winerror == 5:
# This is a permission error, we can't read this key
# move on
pass
else:
raise
def _gather_value_info(self):
"""Compose all values for this key into a dict of form value name: RegistryValue Object"""
if self._values:
return
_, values, _ = self.QueryInfoKey()
for i in range(values):
value_name, value_data, _ = self.EnumValue(i)
self._values[value_name] = RegistryValue(value_name, value_data, self)
def get_subkey(self, sub_key):
"""Returns subkey of name sub_key in a RegistryKey objects"""
return RegistryKey(
os.path.join(self.path, sub_key), self.OpenKeyEx(sub_key, access=winreg.KEY_READ)
)
def get_value(self, val_name):
"""Returns value associated with this key in RegistryValue object"""
return RegistryValue(val_name, self.QueryValueEx(val_name)[0], self)
| RegistryKey |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/docstring_signature.py | {
"start": 33,
"end": 122
} | class ____:
"""B(foo, bar)"""
def __init__(self):
"""B(foo, bar, baz)"""
| B |
python | kamyu104__LeetCode-Solutions | Python/minimum-distance-between-three-equal-elements-i.py | {
"start": 79,
"end": 596
} | class ____(object):
def minimumDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
INF = float("inf")
result = INF
lookup = collections.defaultdict(list)
for i, x in enumerate(nums):
lookup[x].append(i)
if len(lookup[x]) < 3:
continue
result = min(result, 2*(i-lookup[x][-3])) # k, j, i = lookup[x][:-3], (i-j)+(j-k)+(i-k) = 2*(i-k)
return result if result != INF else -1
| Solution |
python | pypa__warehouse | warehouse/accounts/interfaces.py | {
"start": 390,
"end": 437
} | class ____(TokenException):
pass
| TokenExpired |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 19712,
"end": 20035
} | class ____:
x: object
def test_pretty_prints_data_classes():
assert pretty.pretty(SomeDataClass(ReprDetector())) == "SomeDataClass(x=GOOD)"
def test_handles_cycles_in_dataclass():
x = SomeDataClass(x=1)
x.x = x
assert pretty.pretty(x) == "SomeDataClass(x=SomeDataClass(...))"
@dataclass
| SomeDataClass |
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 46673,
"end": 57445
} | class ____(GitPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"output.weight": "git.embeddings.word_embeddings.weight"}
def __init__(self, config):
super().__init__(config)
self.git = GitModel(config)
self.output = nn.Linear(config.hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.output
def set_output_embeddings(self, new_embeddings):
self.output = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
Examples:
Image captioning example:
```python
>>> from transformers import AutoProcessor, AutoModelForCausalLM
>>> import requests
>>> from PIL import Image
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
>>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
>>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_caption)
two cats sleeping on a pink blanket next to remotes.
```
Visual question answering (VQA) example:
```python
>>> from transformers import AutoProcessor, AutoModelForCausalLM
>>> from huggingface_hub import hf_hub_download
>>> from PIL import Image
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")
>>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
>>> image = Image.open(file_path).convert("RGB")
>>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
>>> question = "what does the front of the bus say at the top?"
>>> input_ids = processor(text=question, add_special_tokens=False).input_ids
>>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
>>> input_ids = torch.tensor(input_ids).unsqueeze(0)
>>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
>>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
['what does the front of the bus say at the top? special']
```
Video captioning example:
```python
>>> import av
>>> import numpy as np
>>> from PIL import Image
>>> from huggingface_hub import hf_hub_download
>>> from transformers import AutoProcessor, AutoModelForCausalLM
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")
>>> # set seed for reproducibility
>>> np.random.seed(45)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # load video
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample frames
>>> num_frames = model.config.num_image_with_embedding
>>> indices = sample_frame_indices(
... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
... )
>>> frames = read_video_pyav(container, indices)
>>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
>>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
>>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.git(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
pixel_values=pixel_values,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.output(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss = self.loss_function(
shifted_logits.view(-1, self.config.vocab_size),
labels.view(-1),
vocab_size=self.config.vocab_size,
**kwargs,
)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
):
# Overwritten -- `git` has special cache handling and doesn't support generating from `inputs_embeds` atm
# cut decoder_input_ids if past_key_values is used
if past_key_values is not None:
past_length = past_key_values.get_seq_length()
# Some generation methods already pass only the last input ID
if input_ids.shape[1] > past_length:
remove_prefix_length = past_length
else:
# Default to old behavior: keep only final ID
remove_prefix_length = input_ids.shape[1] - 1
input_ids = input_ids[:, remove_prefix_length:]
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
input_shape = input_ids.shape
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
model_inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": kwargs.get("pixel_values"),
"past_key_values": past_key_values,
"use_cache": use_cache,
}
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
__all__ = ["GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel"]
| GitForCausalLM |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 3552,
"end": 3657
} | class ____(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
| MessyBachelorParty |
python | pypa__twine | twine/exceptions.py | {
"start": 4934,
"end": 5038
} | class ____(TwineException):
"""Raised when a distribution is invalid."""
pass
| InvalidDistribution |
python | tensorflow__tensorflow | tensorflow/python/distribute/one_device_strategy.py | {
"start": 1692,
"end": 9341
} | class ____(distribute_lib.Strategy):
"""A distribution strategy for running on a single device.
Using this strategy will place any variables created in its scope on the
specified device. Input distributed through this strategy will be
prefetched to the specified device. Moreover, any functions called via
`strategy.run` will also be placed on the specified device
as well.
Typical usage of this strategy could be testing your code with the
tf.distribute.Strategy API before switching to other strategies which
actually distribute to multiple devices/machines.
For example:
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
with strategy.scope():
v = tf.Variable(1.0)
print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0
def step_fn(x):
return x * 2
result = 0
for i in range(10):
result += strategy.run(step_fn, args=(i,))
print(result) # 90
```
"""
def __init__(self, device):
"""Creates a `OneDeviceStrategy`.
Args:
device: Device string identifier for the device on which the variables
should be placed. See class docs for more details on how the device is
used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
"""
super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"OneDeviceStrategy")
def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
In this case, there is only one device, so this is only a thin wrapper
around the input dataset. It will, however, prefetch the input data to the
specified device. The returned distributed dataset can be iterated over
similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
Example:
```
strategy = tf.distribute.OneDeviceStrategy()
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
return super(OneDeviceStrategy, self).experimental_distribute_dataset(
dataset, options)
def distribute_datasets_from_function(
self,
dataset_fn, # pylint: disable=useless-super-delegation
options=None):
"""Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.
`dataset_fn` will be called once for each worker in the strategy. In this
case, we only have one worker and one device so `dataset_fn` is called
once.
The `dataset_fn` should take an `tf.distribute.InputContext` instance where
information about batching and input replication can be accessed:
```
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
inputs = strategy.distribute_datasets_from_function(dataset_fn)
for batch in inputs:
replica_results = strategy.run(replica_fn, args=(batch,))
```
IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a
per-replica batch size, unlike `experimental_distribute_dataset`, which uses
the global batch size. This may be computed using
`input_context.get_per_replica_batch_size`.
Args:
dataset_fn: A function taking a `tf.distribute.InputContext` instance and
returning a `tf.data.Dataset`.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`", which the caller can iterate over like regular
datasets.
"""
return super(OneDeviceStrategy,
self).distribute_datasets_from_function(dataset_fn, options)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(OneDeviceStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
In `OneDeviceStrategy`, there is only one replica, so if axis=None, value
is simply returned. If axis is specified as something other than None,
such as axis=0, value is reduced along that axis and returned.
Example:
```
t = tf.range(10)
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()
# result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()
# result: 45
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `run` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `OneDeviceStrategy`, all variables created inside `strategy.scope()`
will be on `device` specified at strategy construction time.
See example in the docs for this class.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(OneDeviceStrategy, self).scope()
@tf_export(v1=["distribute.OneDeviceStrategy"]) # pylint: disable=empty-docstring
| OneDeviceStrategy |
python | python__mypy | mypy/test/testmodulefinder.py | {
"start": 5627,
"end": 13957
} | class ____(Suite):
def setUp(self) -> None:
self.package_dir = os.path.relpath(
os.path.join(package_path, "modulefinder-site-packages")
)
package_paths = (
os.path.join(self.package_dir, "baz"),
os.path.join(self.package_dir, "..", "not-a-directory"),
os.path.join(self.package_dir, "..", "modulefinder-src"),
self.package_dir,
)
self.search_paths = SearchPaths(
python_path=(),
mypy_path=(os.path.join(data_path, "pkg1"),),
package_path=tuple(package_paths),
typeshed_path=(),
)
options = Options()
options.namespace_packages = True
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
options = Options()
options.namespace_packages = False
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
def path(self, *parts: str) -> str:
return os.path.abspath(os.path.join(self.package_dir, *parts))
def test__packages_with_ns(self) -> None:
cases = [
# Namespace package with py.typed
("ns_pkg_typed", self.path("ns_pkg_typed")),
("ns_pkg_typed.a", self.path("ns_pkg_typed", "a.py")),
("ns_pkg_typed.b", self.path("ns_pkg_typed", "b")),
("ns_pkg_typed.b.c", self.path("ns_pkg_typed", "b", "c.py")),
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Namespace package without py.typed
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Namespace package without stub package
("ns_pkg_w_stubs", self.path("ns_pkg_w_stubs")),
("ns_pkg_w_stubs.typed", self.path("ns_pkg_w_stubs-stubs", "typed", "__init__.pyi")),
(
"ns_pkg_w_stubs.typed_inline",
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py"),
),
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Regular package with py.typed
("pkg_typed", self.path("pkg_typed", "__init__.py")),
("pkg_typed.a", self.path("pkg_typed", "a.py")),
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Regular package with py.typed, bundled stubs, and external stubs-only package
("pkg_typed_w_stubs", self.path("pkg_typed_w_stubs-stubs", "__init__.pyi")),
("pkg_typed_w_stubs.spam", self.path("pkg_typed_w_stubs-stubs", "spam.pyi")),
# Regular package without py.typed
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Top-level Python file in site-packages
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Packages found by following .pth files
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
("ns_baz_pkg.a", self.path("baz", "ns_baz_pkg", "a.py")),
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
("ns_neighbor_pkg.a", self.path("..", "modulefinder-src", "ns_neighbor_pkg", "a.py")),
# Something that doesn't exist
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
# A regular package with an installed set of stubs
("foo.bar", self.path("foo-stubs", "bar.pyi")),
# A regular, non-site-packages module
("a", os.path.abspath(os.path.join(data_path, "pkg1", "a.py"))),
]
for module, expected in cases:
template = "Find(" + module + ") got {}; expected {}"
actual = self.fmc_ns.find_module(module)
assert_equal(actual, expected, template)
def test__packages_without_ns(self) -> None:
cases = [
# Namespace package with py.typed
("ns_pkg_typed", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.a", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.b", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.b.c", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Namespace package without py.typed
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Namespace package without stub package
("ns_pkg_w_stubs", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_w_stubs.typed", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
(
"ns_pkg_w_stubs.typed_inline",
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py"),
),
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Regular package with py.typed
("pkg_typed", self.path("pkg_typed", "__init__.py")),
("pkg_typed.a", self.path("pkg_typed", "a.py")),
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Regular package with py.typed, bundled stubs, and external stubs-only package
("pkg_typed_w_stubs", self.path("pkg_typed_w_stubs-stubs", "__init__.pyi")),
("pkg_typed_w_stubs.spam", self.path("pkg_typed_w_stubs-stubs", "spam.pyi")),
# Regular package without py.typed
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Top-level Python file in site-packages
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Packages found by following .pth files
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
("ns_baz_pkg.a", ModuleNotFoundReason.NOT_FOUND),
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
("ns_neighbor_pkg.a", ModuleNotFoundReason.NOT_FOUND),
# Something that doesn't exist
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
# A regular package with an installed set of stubs
("foo.bar", self.path("foo-stubs", "bar.pyi")),
# A regular, non-site-packages module
("a", os.path.abspath(os.path.join(data_path, "pkg1", "a.py"))),
]
for module, expected in cases:
template = "Find(" + module + ") got {}; expected {}"
actual = self.fmc_nons.find_module(module)
assert_equal(actual, expected, template)
| ModuleFinderSitePackagesSuite |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 5671,
"end": 5857
} | class ____(ConcreteTemplate):
key = cuda.cbrt
cases = [
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
@register
| Cuda_cbrt |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 10293,
"end": 15839
} | class ____(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("colleague_id", Integer, ForeignKey("people.person_id")),
Column("name", String(50)),
Column("type", String(30)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
)
data = Table(
"data",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class Manager(Person):
pass
class Data(cls.Comparable):
def __init__(self, data):
self.data = data
def _setup_mappings(self, jointype, usedata):
Person, Manager, Data = self.classes("Person", "Manager", "Data")
if jointype == "join1":
poly_union = polymorphic_union(
{
"manager": managers.join(
people, people.c.person_id == managers.c.person_id
),
"person": people.select()
.where(people.c.type == "person")
.subquery(),
},
None,
)
elif jointype == "join2":
poly_union = polymorphic_union(
{
"manager": join(
people,
managers,
people.c.person_id == managers.c.person_id,
),
"person": people.select()
.where(people.c.type == "person")
.subquery(),
},
None,
)
elif jointype == "join3":
poly_union = people.outerjoin(managers)
elif jointype == "join4":
poly_union = None
else:
assert False
if usedata:
self.mapper_registry.map_imperatively(Data, data)
if usedata:
self.mapper_registry.map_imperatively(
Person,
people,
with_polymorphic=("*", poly_union),
polymorphic_identity="person",
polymorphic_on=people.c.type,
properties={
"colleagues": relationship(
Person,
primaryjoin=people.c.colleague_id
== people.c.person_id,
remote_side=people.c.colleague_id,
uselist=True,
),
"data": relationship(Data, uselist=False),
},
)
else:
self.mapper_registry.map_imperatively(
Person,
people,
with_polymorphic=("*", poly_union),
polymorphic_identity="person",
polymorphic_on=people.c.type,
properties={
"colleagues": relationship(
Person,
primaryjoin=people.c.colleague_id
== people.c.person_id,
remote_side=people.c.colleague_id,
uselist=True,
)
},
)
self.mapper_registry.map_imperatively(
Manager,
managers,
inherits=Person,
inherit_condition=people.c.person_id == managers.c.person_id,
polymorphic_identity="manager",
)
@testing.combinations(
("join1",), ("join2",), ("join3",), ("join4",), argnames="jointype"
)
@testing.combinations(
("usedata", True), ("nodata", False), id_="ia", argnames="usedata"
)
def test_relationship_on_base_class(self, jointype, usedata):
self._setup_mappings(jointype, usedata)
Person, Manager, Data = self.classes("Person", "Manager", "Data")
sess = fixture_session()
p = Person(name="person1")
p2 = Person(name="person2")
p3 = Person(name="person3")
m = Manager(name="manager1")
p.colleagues.append(p2)
m.colleagues.append(p3)
if usedata:
p.data = Data("ps data")
m.data = Data("ms data")
sess.add(m)
sess.add(p)
sess.flush()
sess.expunge_all()
p = sess.get(Person, p.person_id)
p2 = sess.get(Person, p2.person_id)
p3 = sess.get(Person, p3.person_id)
m = sess.get(Person, m.person_id)
assert len(p.colleagues) == 1
assert p.colleagues == [p2]
assert m.colleagues == [p3]
if usedata:
assert p.data.data == "ps data"
assert m.data.data == "ms data"
| RelationshipTest3 |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 7540,
"end": 8491
} | class ____(HandlerNpoints):
"""
A legend handler that shows *numpoints* in the legend, and allows them to
be individually offset in the y-direction.
"""
def __init__(self, numpoints=None, yoffsets=None, **kwargs):
"""
Parameters
----------
numpoints : int
Number of points to show in legend entry.
yoffsets : array of floats
Length *numpoints* list of y offsets for each point in
legend entry.
**kwargs
Keyword arguments forwarded to `.HandlerNpoints`.
"""
super().__init__(numpoints=numpoints, **kwargs)
self._yoffsets = yoffsets
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * legend._scatteryoffsets
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
| HandlerNpointsYoffsets |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 17100,
"end": 17480
} | class ____(_DeprecationTestCase):
# Deprecated in Numpy 2.4, 2025-08, gh-27639
message = "Passing more than 2 positional arguments to np.maximum and np.minimum "
@pytest.mark.parametrize("ufunc", [np.minimum, np.maximum])
def test_extremem_3_args(self, ufunc):
self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1)))
| TestTooManyArgsExtremum |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 930592,
"end": 930965
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
OrganizationAuditEntryData,
RepositoryAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("visibility",)
visibility = sgqlc.types.Field(
RepoAccessAuditEntryVisibility, graphql_name="visibility"
)
| RepoAccessAuditEntry |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 7948,
"end": 37265
} | class ____(FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefnumber):
r"""
Angle schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "angle"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Angle: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Angle: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Angle: ...
@overload
def bandPosition(self, _: float, /) -> Angle: ...
@overload
def bin(self, _: bool | Bin | None, /) -> Angle: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Angle: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> Angle: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> Angle: ...
@overload
def condition(self, _: list[core.ConditionalValueDefnumberExprRef], /) -> Angle: ...
@overload
def field(self, _: str | RepeatRef, /) -> Angle: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Angle: ...
@overload
def legend(self, _: Legend | None, /) -> Angle: ...
@overload
def legend(
self,
*,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columns: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
direction: Optional[SchemaBase | Orientation_T] = Undefined,
fillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[SchemaBase | LegendOrient_T] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
type: Optional[Literal["symbol", "gradient"]] = Undefined,
values: Optional[
Parameter
| SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
] = Undefined,
zindex: Optional[float] = Undefined,
) -> Angle: ...
@overload
def scale(self, _: Scale | None, /) -> Angle: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> Angle: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> Angle: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Angle: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Angle: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Angle: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Angle: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Angle: ...
@overload
def type(self, _: StandardType_T, /) -> Angle: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Angle |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 348,
"end": 424
} | class ____(TestModel):
k = TextField(unique=True)
v = IntegerField()
| KV |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/utils_v1/export_output.py | {
"start": 3358,
"end": 5989
} | class ____(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, tensor.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, tensor.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
| ClassificationOutput |
python | walkccc__LeetCode | solutions/1093. Statistics from a Large Sample/1093.py | {
"start": 0,
"end": 749
} | class ____:
def sampleStats(self, count: list[int]) -> list[float]:
minimum = next((i for i, num in enumerate(count) if num), None)
maximum = next((i for i, num in reversed(
list(enumerate(count))) if num), None)
n = sum(count)
mean = sum(i * c / n for i, c in enumerate(count))
mode = count.index(max(count))
numCount = 0
leftMedian = 0
for i, c in enumerate(count):
numCount += c
if numCount >= n / 2:
leftMedian = i
break
numCount = 0
rightMedian = 0
for i, c in reversed(list(enumerate(count))):
numCount += c
if numCount >= n / 2:
rightMedian = i
break
return [minimum, maximum, mean, (leftMedian + rightMedian) / 2, mode]
| Solution |
python | jazzband__django-oauth-toolkit | tests/test_password.py | {
"start": 2708,
"end": 3656
} | class ____(BaseTest):
def test_password_resource_access_allowed(self):
token_request_data = {
"grant_type": "password",
"username": "test_user",
"password": "123456",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
| TestPasswordProtectedResource |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 10269,
"end": 14641
} | class ____(Enum):
UNEXPECTED_TERMINATION = "UNEXPECTED_TERMINATION"
RUN_EXCEPTION = "RUN_EXCEPTION"
STEP_FAILURE = "STEP_FAILURE"
JOB_INITIALIZATION_FAILURE = "JOB_INITIALIZATION_FAILURE"
START_TIMEOUT = "START_TIMEOUT"
RUN_WORKER_RESTART = "RUN_WORKER_RESTART"
UNKNOWN = "UNKNOWN"
def _assert_type(
method: str,
expected_type: Union[DagsterEventType, Sequence[DagsterEventType]],
actual_type: DagsterEventType,
) -> None:
_expected_type = (
[expected_type] if isinstance(expected_type, DagsterEventType) else expected_type
)
check.invariant(
actual_type in _expected_type,
f"{method} only callable when event_type is"
f" {','.join([t.value for t in _expected_type])}, called on {actual_type}",
)
def _validate_event_specific_data(
event_type: DagsterEventType, event_specific_data: Optional["EventSpecificData"]
) -> Optional["EventSpecificData"]:
if event_type == DagsterEventType.STEP_OUTPUT:
check.inst_param(event_specific_data, "event_specific_data", StepOutputData)
elif event_type == DagsterEventType.STEP_FAILURE:
check.inst_param(event_specific_data, "event_specific_data", StepFailureData)
elif event_type == DagsterEventType.STEP_SUCCESS:
check.inst_param(event_specific_data, "event_specific_data", StepSuccessData)
elif event_type == DagsterEventType.ASSET_MATERIALIZATION:
check.inst_param(event_specific_data, "event_specific_data", StepMaterializationData)
elif event_type == DagsterEventType.STEP_EXPECTATION_RESULT:
check.inst_param(event_specific_data, "event_specific_data", StepExpectationResultData)
elif event_type == DagsterEventType.STEP_INPUT:
check.inst_param(event_specific_data, "event_specific_data", StepInputData)
elif event_type in (
DagsterEventType.ENGINE_EVENT,
DagsterEventType.STEP_WORKER_STARTING,
DagsterEventType.STEP_WORKER_STARTED,
DagsterEventType.RESOURCE_INIT_STARTED,
DagsterEventType.RESOURCE_INIT_SUCCESS,
DagsterEventType.RESOURCE_INIT_FAILURE,
):
check.inst_param(event_specific_data, "event_specific_data", EngineEventData)
elif event_type == DagsterEventType.HOOK_ERRORED:
check.inst_param(event_specific_data, "event_specific_data", HookErroredData)
elif event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:
check.inst_param(
event_specific_data, "event_specific_data", AssetMaterializationPlannedData
)
elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:
check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluationPlanned)
elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION:
check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluation)
elif event_type == DagsterEventType.RUN_ENQUEUED:
check.opt_inst_param(event_specific_data, "event_specific_data", RunEnqueuedData)
return event_specific_data
def generate_event_batch_id():
return str(uuid.uuid4())
def log_step_event(
step_context: IStepContext,
event: "DagsterEvent",
batch_metadata: Optional["DagsterEventBatchMetadata"],
) -> None:
event_type = DagsterEventType(event.event_type_value)
log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG
step_context.log.log_dagster_event(
level=log_level,
msg=event.message or f"{event_type} for step {step_context.step.key}",
dagster_event=event,
batch_metadata=batch_metadata,
)
def log_job_event(job_context: IPlanContext, event: "DagsterEvent") -> None:
event_type = DagsterEventType(event.event_type_value)
log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG
job_context.log.log_dagster_event(
level=log_level,
msg=event.message or f"{event_type} for pipeline {job_context.job_name}",
dagster_event=event,
)
def log_resource_event(log_manager: DagsterLogManager, event: "DagsterEvent") -> None:
event_specific_data = cast("EngineEventData", event.event_specific_data)
log_level = logging.ERROR if event_specific_data.error else logging.DEBUG
log_manager.log_dagster_event(level=log_level, msg=event.message or "", dagster_event=event)
| RunFailureReason |
python | django__django | tests/generic_relations/models.py | {
"start": 3933,
"end": 4142
} | class ____(models.Model):
content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
| AllowsNullGFK |
python | tornadoweb__tornado | demos/tcpecho/server.py | {
"start": 307,
"end": 1118
} | class ____(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
data = yield stream.read_until(b"\n")
logger.info("Received bytes: %s", data)
if not data.endswith(b"\n"):
data = data + b"\n"
yield stream.write(data)
except StreamClosedError:
logger.warning("Lost client at host %s", address[0])
break
except Exception as e:
print(e)
async def main():
options.parse_command_line()
logger.info("Listening on TCP port %d", options.port)
server = EchoServer()
server.listen(options.port)
await asyncio.Event().wait()
if __name__ == "__main__":
asyncio.run(main())
| EchoServer |
python | ZoranPandovski__al-go-rithms | math/highest_common_factor/cpp/hcf.py | {
"start": 774,
"end": 1018
} | class ____(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testsimple(self):
a = hcf.hcf(40,16)
self.assertEqual(a,8)
if __name__ == '__main__':
unittest.main()
| TestHCFFunction |
python | imageio__imageio | imageio/plugins/tifffile_v3.py | {
"start": 2320,
"end": 14335
} | class ____(PluginV3):
"""Support for tifffile as backend.
Parameters
----------
request : iio.Request
A request object that represents the users intent. It provides a
standard interface for a plugin to access the various ImageResources.
Check the docs for details.
kwargs : Any
Additional kwargs are forwarded to tifffile's constructor, i.e.
to ``TiffFile`` for reading or ``TiffWriter`` for writing.
"""
def __init__(self, request: Request, **kwargs) -> None:
super().__init__(request)
self._fh = None
if request.mode.io_mode == "r":
try:
self._fh = tifffile.TiffFile(request.get_file(), **kwargs)
except tifffile.tifffile.TiffFileError:
raise InitializationError("Tifffile can not read this file.")
else:
self._fh = tifffile.TiffWriter(request.get_file(), **kwargs)
# ---------------------
# Standard V3 Interface
# ---------------------
def read(self, *, index: int = None, page: int = None, **kwargs) -> np.ndarray:
"""Read a ndimage or page.
The ndimage returned depends on the value of both ``index`` and
``page``. ``index`` selects the series to read and ``page`` allows
selecting a single page from the selected series. If ``index=None``,
``page`` is understood as a flat index, i.e., the selection ignores
individual series inside the file. If both ``index`` and ``page`` are
``None``, then all the series are read and returned as a batch.
Parameters
----------
index : int
If ``int``, select the ndimage (series) located at that index inside
the file and return ``page`` from it. If ``None`` and ``page`` is
``int`` read the page located at that (flat) index inside the file.
If ``None`` and ``page=None``, read all ndimages from the file and
return them as a batch.
page : int
If ``None`` return the full selected ndimage. If ``int``, read the
page at the selected index and return it.
kwargs : Any
Additional kwargs are forwarded to TiffFile's ``as_array`` method.
Returns
-------
ndarray : np.ndarray
The decoded ndimage or page.
"""
if "key" not in kwargs:
kwargs["key"] = page
elif page is not None:
raise ValueError("Can't use `page` and `key` at the same time.")
# set plugin default for ``index``
if index is not None and "series" in kwargs:
raise ValueError("Can't use `series` and `index` at the same time.")
elif "series" in kwargs:
index = kwargs.pop("series")
elif index is not None:
pass
else:
index = 0
if index is Ellipsis and page is None:
# read all series in the file and return them as a batch
ndimage = np.stack([x for x in self.iter(**kwargs)])
else:
index = None if index is Ellipsis else index
ndimage = self._fh.asarray(series=index, **kwargs)
return ndimage
def iter(self, **kwargs) -> np.ndarray:
"""Yield ndimages from the TIFF.
Parameters
----------
kwargs : Any
Additional kwargs are forwarded to the TiffPageSeries' ``as_array``
method.
Yields
------
ndimage : np.ndarray
A decoded ndimage.
"""
for sequence in self._fh.series:
yield sequence.asarray(**kwargs)
def write(
self, ndimage: ArrayLike, *, is_batch: bool = False, **kwargs
) -> Optional[bytes]:
"""Save a ndimage as TIFF.
Parameters
----------
ndimage : ArrayLike
The ndimage to encode and write to the ImageResource.
is_batch : bool
If True, the first dimension of the given ndimage is treated as a
batch dimension and each element will create a new series.
kwargs : Any
Additional kwargs are forwarded to TiffWriter's ``write`` method.
Returns
-------
encoded_image : bytes
If the ImageResource is ``"<bytes>"``, return the encoded bytes.
Otherwise write returns None.
Notes
-----
Incremental writing is supported. Subsequent calls to ``write`` will
create new series unless ``contiguous=True`` is used, in which case the
call to write will append to the current series.
"""
if not is_batch:
ndimage = np.asarray(ndimage)[None, :]
for image in ndimage:
self._fh.write(image, **kwargs)
if self._request._uri_type == URI_BYTES:
self._fh.close()
file = cast(BytesIO, self._request.get_file())
return file.getvalue()
def metadata(
self, *, index: int = Ellipsis, page: int = None, exclude_applied: bool = True
) -> Dict[str, Any]:
"""Format-Specific TIFF metadata.
The metadata returned depends on the value of both ``index`` and
``page``. ``index`` selects a series and ``page`` allows selecting a
single page from the selected series. If ``index=Ellipsis``, ``page`` is
understood as a flat index, i.e., the selection ignores individual
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
global (file-level) metadata is returned.
Parameters
----------
index : int
Select the series of which to extract metadata from. If Ellipsis, treat
page as a flat index into the file's pages.
page : int
If not None, select the page of which to extract metadata from. If
None, read series-level metadata or, if ``index=...`` global,
file-level metadata.
exclude_applied : bool
For API compatibility. Currently ignored.
Returns
-------
metadata : dict
A dictionary with information regarding the tiff flavor (file-level)
or tiff tags (page-level).
"""
if index is not Ellipsis and page is not None:
target = self._fh.series[index].pages[page]
elif index is not Ellipsis and page is None:
# This is based on my understanding that series-level metadata is
# stored in the first TIFF page.
target = self._fh.series[index].pages[0]
elif index is Ellipsis and page is not None:
target = self._fh.pages[page]
else:
target = None
metadata = {}
if target is None:
# return file-level metadata
metadata["byteorder"] = self._fh.byteorder
for flag in tifffile.TIFF.FILE_FLAGS:
flag_value = getattr(self._fh, "is_" + flag)
metadata["is_" + flag] = flag_value
if flag_value and hasattr(self._fh, flag + "_metadata"):
flavor_metadata = getattr(self._fh, flag + "_metadata")
if isinstance(flavor_metadata, tuple):
metadata.update(flavor_metadata[0])
else:
metadata.update(flavor_metadata)
else:
# tifffile may return a TiffFrame instead of a page
target = target.keyframe
metadata.update({tag.name: tag.value for tag in target.tags})
metadata.update(
{
"planar_configuration": target.planarconfig,
"compression": target.compression,
"predictor": target.predictor,
"orientation": None, # TODO
"description1": target.description1,
"description": target.description,
"software": target.software,
**_get_resolution(target),
"datetime": target.datetime,
}
)
return metadata
def properties(self, *, index: int = None, page: int = None) -> ImageProperties:
"""Standardized metadata.
The properties returned depend on the value of both ``index`` and
``page``. ``index`` selects a series and ``page`` allows selecting a
single page from the selected series. If ``index=Ellipsis``, ``page`` is
understood as a flat index, i.e., the selection ignores individual
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
global (file-level) properties are returned. If ``index=Ellipsis``
and ``page=...``, file-level properties for the flattened index are
returned.
Parameters
----------
index : int
If ``int``, select the ndimage (series) located at that index inside
the file. If ``Ellipsis`` and ``page`` is ``int`` extract the
properties of the page located at that (flat) index inside the file.
If ``Ellipsis`` and ``page=None``, return the properties for the
batch of all ndimages in the file.
page : int
If ``None`` return the properties of the full ndimage. If ``...``
return the properties of the flattened index. If ``int``,
return the properties of the page at the selected index only.
Returns
-------
image_properties : ImageProperties
The standardized metadata (properties) of the selected ndimage or series.
"""
index = index or 0
page_idx = 0 if page in (None, Ellipsis) else page
if index is Ellipsis:
target_page = self._fh.pages[page_idx]
else:
target_page = self._fh.series[index].pages[page_idx]
if index is Ellipsis and page is None:
n_series = len(self._fh.series)
props = ImageProperties(
shape=(n_series, *target_page.shape),
dtype=target_page.dtype,
n_images=n_series,
is_batch=True,
spacing=_get_resolution(target_page).get("resolution"),
)
elif index is Ellipsis and page is Ellipsis:
n_pages = len(self._fh.pages)
props = ImageProperties(
shape=(n_pages, *target_page.shape),
dtype=target_page.dtype,
n_images=n_pages,
is_batch=True,
spacing=_get_resolution(target_page).get("resolution"),
)
else:
props = ImageProperties(
shape=target_page.shape,
dtype=target_page.dtype,
is_batch=False,
spacing=_get_resolution(target_page).get("resolution"),
)
return props
def close(self) -> None:
if self._fh is not None:
self._fh.close()
super().close()
# ------------------------------
# Add-on Interface inside imopen
# ------------------------------
def iter_pages(self, index=..., **kwargs):
"""Yield pages from a TIFF file.
This generator walks over the flat index of the pages inside an
ImageResource and yields them in order.
Parameters
----------
index : int
The index of the series to yield pages from. If Ellipsis, walk over
the file's flat index (and ignore individual series).
kwargs : Any
Additional kwargs are passed to TiffPage's ``as_array`` method.
Yields
------
page : np.ndarray
A page stored inside the TIFF file.
"""
if index is Ellipsis:
pages = self._fh.pages
else:
pages = self._fh.series[index]
for page in pages:
yield page.asarray(**kwargs)
| TifffilePlugin |
python | pytorch__pytorch | torch/nn/attention/varlen.py | {
"start": 546,
"end": 10101
} | class ____(NamedTuple):
"""
Request which auxiliary outputs to compute from varlen_attn.
Each field is a boolean indicating whether that auxiliary output should be computed.
"""
lse: bool = False
@torch.library.custom_op("torch_attn::_varlen_attn", mutates_args={})
def _varlen_attn(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
cu_seq_q: torch.Tensor,
cu_seq_k: torch.Tensor,
max_q: int,
max_k: int,
is_causal: bool = False,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Private custom op for variable-length attention.
This is the internal implementation. Users should use the public varlen_attn function instead.
"""
use_cudnn = query.is_cuda and _should_use_cudnn(query.device.index)
if use_cudnn:
log.info("Using cuDNN backend for varlen_attn")
result = torch.ops.aten._cudnn_attention_forward(
query,
key,
value,
None, # attn_bias
cu_seq_q,
cu_seq_k,
max_q,
max_k,
True, # compute_log_sumexp
0.0, # dropout_p hardcoded to 0.0
is_causal,
False, # return_debug_mask
)
# cuDNN returns: (output, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, philox_seed, philox_offset, debug_attn_mask)
output, softmax_lse, rng_state = result[0], result[1], result[6]
else:
log.info("Using Flash Attention backend for varlen_attn")
output, softmax_lse, rng_state, _, _ = torch.ops.aten._flash_attention_forward(
query,
key,
value,
cu_seq_q,
cu_seq_k,
max_q,
max_k,
0.0, # dropout_p hardcoded to 0.0
is_causal,
return_debug_mask=False,
)
rng_state_ = torch.zeros(
(2,), dtype=torch.uint64, device=query.device
) # hardcoded since dropout is hardcoded to 0
return output, softmax_lse, rng_state_
@_varlen_attn.register_fake
def _varlen_attn_fake(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
cu_seq_q: torch.Tensor,
cu_seq_k: torch.Tensor,
max_q: int,
max_k: int,
is_causal: bool = False,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Fake implementation for meta tensor computation and tracing.
Based on the 3D varlen path from meta__flash_attention_forward:
- query shape: (total, num_heads, head_dim)
- logsumexp shape: (num_heads, total_q)
"""
# Output has same shape as query
output = torch.empty_like(query)
# For varlen path: logsumexp shape is (num_heads, total_q)
total_q = query.size(0)
num_heads = query.size(1)
logsumexp = torch.empty(
(num_heads, total_q), dtype=torch.float, device=query.device
)
rng_state = torch.empty((2,), dtype=torch.uint64, device=query.device)
return output, logsumexp, rng_state
def varlen_attn(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
cu_seq_q: torch.Tensor,
cu_seq_k: torch.Tensor,
max_q: int,
max_k: int,
is_causal: bool = False,
return_aux: AuxRequest | None = None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
"""
Compute variable-length attention using Flash Attention.
This function is similar to scaled_dot_product_attention but optimized for
variable-length sequences using cumulative sequence position tensors.
Args:
- query (Tensor): Query tensor; shape :math:`(T_q, H, D)`
- key (Tensor): Key tensor; shape :math:`(T_k, H, D)`
- value (Tensor): Value tensor; shape :math:`(T_k, H, D)`
- cu_seq_q (Tensor): Cumulative sequence positions for queries; shape :math:`(N+1,)`
- cu_seq_k (Tensor): Cumulative sequence positions for keys/values; shape :math:`(N+1,)`
- max_q (int): Maximum query sequence length in the batch.
- max_k (int): Maximum key/value sequence length in the batch.
- is_causal (bool, optional): If set to True, applies causal masking (default: False).
- return_aux (Optional[AuxRequest]): If not None and ``return_aux.lse`` is True, also returns the logsumexp tensor.
Shape legend:
- :math:`N`: Batch size
- :math:`T_q`: Total number of query tokens in the batch (sum of all query sequence lengths)
- :math:`T_k`: Total number of key/value tokens in the batch (sum of all key/value sequence lengths)
- :math:`H`: Number of attention heads
- :math:`D`: Head dimension
Returns:
- Tensor: Output tensor from attention computation
- If ``return_aux`` is not None and ``return_aux.lse`` is True, returns a tuple of Tensors:
(output, lse), where lse is the logsumexp
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> batch_size, max_seq_len, embed_dim, num_heads = 2, 512, 1024, 16
>>> head_dim = embed_dim // num_heads
>>> seq_lengths = []
>>> for _ in range(batch_size):
... length = torch.randint(1, max_seq_len // 64 + 1, (1,)).item() * 64
... seq_lengths.append(min(length, max_seq_len))
>>> seq_lengths = torch.tensor(seq_lengths, device="cuda")
>>> total_tokens = seq_lengths.sum().item()
>>>
>>> # Create packed query, key, value tensors
>>> query = torch.randn(
... total_tokens, num_heads, head_dim, dtype=torch.float16, device="cuda"
... )
>>> key = torch.randn(
... total_tokens, num_heads, head_dim, dtype=torch.float16, device="cuda"
... )
>>> value = torch.randn(
... total_tokens, num_heads, head_dim, dtype=torch.float16, device="cuda"
... )
>>>
>>> # Build cumulative sequence tensor
>>> cu_seq = torch.zeros(batch_size + 1, device="cuda", dtype=torch.int32)
>>> cu_seq[1:] = seq_lengths.cumsum(0)
>>> max_len = seq_lengths.max().item()
>>>
>>> # Call varlen_attn
>>> output = varlen_attn(
... query, key, value, cu_seq, cu_seq, max_len, max_len, is_causal=False
... )
"""
out, lse, _ = torch.ops.torch_attn._varlen_attn(
query, key, value, cu_seq_q, cu_seq_k, max_q, max_k, is_causal
)
if return_aux is not None and return_aux.lse:
return out, lse
return out
def _setup_context(ctx: Any, inputs: tuple[Any, ...], output: Any) -> None:
query, key, value, cu_seq_q, cu_seq_k, max_q, max_k, is_causal = inputs
out, lse, rng_state = output
ctx.save_for_backward(query, key, value, cu_seq_q, cu_seq_k, out, lse, rng_state)
ctx.max_q = max_q
ctx.max_k = max_k
ctx.is_causal = is_causal
@torch.library.custom_op("torch_attn::_varlen_attn_backward", mutates_args={})
def _varlen_attn_backward(
grad_out: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
out: torch.Tensor,
lse: torch.Tensor,
cu_seq_q: torch.Tensor,
cu_seq_k: torch.Tensor,
max_q: int,
max_k: int,
is_causal: bool,
rng_state: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
unused = torch.empty(0, device=query.device)
use_cudnn = query.is_cuda and _should_use_cudnn(query.device.index)
if use_cudnn:
log.info("Using cuDNN backend for varlen_attn")
dq, dk, dv = torch.ops.aten._cudnn_attention_backward(
grad_out,
query,
key,
value,
out,
lse,
cu_seq_q,
cu_seq_k,
max_q,
max_k,
0.0,
is_causal,
rng_state,
unused,
)
else:
log.info("Using Flash Attention backend for varlen_attn")
dq, dk, dv = torch.ops.aten._flash_attention_backward(
grad_out,
query,
key,
value,
out,
lse,
cu_seq_q,
cu_seq_k,
max_q,
max_k,
0.0,
is_causal,
rng_state,
unused,
)
return dq, dk, dv
@_varlen_attn_backward.register_fake
def _varlen_attn_backward_fake(
grad_out: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
out: torch.Tensor,
lse: torch.Tensor,
cu_seq_q: torch.Tensor,
cu_seq_k: torch.Tensor,
max_q: int,
max_k: int,
is_causal: bool,
rng_state: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Fake implementation for meta tensor computation and tracing.
"""
grad_query = torch.empty_like(query)
grad_key = torch.empty_like(key)
grad_value = torch.empty_like(value)
return grad_query, grad_key, grad_value
def _backward(
ctx: Any, grad_out: torch.Tensor, grad_lse: torch.Tensor, grad_rng: torch.Tensor
) -> tuple[torch.Tensor | None, ...]:
query, key, value, cu_seq_q, cu_seq_k, out, lse, rng_state = ctx.saved_tensors
max_q = ctx.max_q
max_k = ctx.max_k
is_causal = ctx.is_causal
dq, dk, dv = torch.ops.torch_attn._varlen_attn_backward(
grad_out,
query,
key,
value,
out,
lse,
cu_seq_q,
cu_seq_k,
max_q,
max_k,
is_causal,
rng_state,
)
return dq, dk, dv, None, None, None, None, None, None
_varlen_attn.register_autograd(_backward, setup_context=_setup_context)
| AuxRequest |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/get_task_states_test.py | {
"start": 1598,
"end": 6157
} | class ____(object): # pylint: disable=missing-docstring
def setUp(self, num_workers, num_ps):
super().setUp()
self._cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
self._cluster_def = self._cluster.cluster_resolver.cluster_spec().as_dict()
self._cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(self._cluster_def), rpc_layer="grpc")
context.context().configure_coordination_service(
service_type="standalone",
service_leader="/job:ps/replica:0/task:0",
heartbeat_timeout_in_ms=_PULL_FREQ_IN_SEC * 1000,
allow_new_incarnation_to_reconnect=True)
self.strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
self.cluster_coord = cluster_coordinator.ClusterCoordinator(self.strategy)
self.num_workers = num_workers
self.num_ps = num_ps
self.states = None
self.polling_thread = utils.RepeatedTimer(
interval=_PULL_FREQ_IN_SEC, function=self.get_task_states)
def tearDown(self):
super().tearDown()
self.polling_thread.stop()
self._cluster.stop()
self._cluster = None
def get_task_states(self):
self.states = context.context().get_task_states([("worker",
self.num_workers),
("ps", self.num_ps)])
def testAllTasksHealthy(self):
time.sleep(_PULL_FREQ_IN_SEC * 1.5)
self.assertLen(self.states, self.num_workers + self.num_ps)
for state in self.states:
self.assertIsNone(state)
def testWorkerPreempted(self):
self._cluster.kill_task("worker", 0)
time.sleep(_PULL_FREQ_IN_SEC * 2)
self.assertLen(self.states, self.num_workers + self.num_ps)
self.assertIsInstance(self.states[0], errors.UnavailableError)
self.assertIn("/job:worker/replica:0/task:0", self.states[0]._message)
self.assertEqual(self.states[0]._error_code, error_codes_pb2.UNAVAILABLE)
self.assertIn(_COORDINATION_ERROR_PAYLOAD_KEY,
self.states[0]._experimental_payloads)
for i in range(1, self.num_workers + self.num_ps):
self.assertIsNone(self.states[i])
self._cluster.start_task("worker", 0)
context.context().update_server_def(context.get_server_def())
time.sleep(_PULL_FREQ_IN_SEC * 2)
for state in self.states:
self.assertIsNone(state)
def testPSPreempted(self):
self._cluster.kill_task("ps", 1)
time.sleep(_PULL_FREQ_IN_SEC * 2)
self.assertLen(self.states, self.num_workers + self.num_ps)
state_ix = self.num_workers + 1
self.assertIsInstance(self.states[state_ix], errors.UnavailableError)
self.assertIn("/job:ps/replica:0/task:1", self.states[state_ix]._message)
self.assertEqual(self.states[state_ix]._error_code,
error_codes_pb2.UNAVAILABLE)
# Simulate the restart of all the tasks.
self._cluster.kill_task("ps", 0)
for index in range(2, self.num_ps):
self._cluster.kill_task("ps", index)
for index in range(self.num_workers):
self._cluster.kill_task("worker", index)
for index in range(self.num_ps):
self._cluster.start_task("ps", index)
for index in range(self.num_workers):
self._cluster.start_task("worker", index)
context.context().update_server_def(context.get_server_def())
time.sleep(_PULL_FREQ_IN_SEC * 2)
self.assertLen(self.states, self.num_workers + self.num_ps)
for state in self.states:
self.assertIsNone(state)
def testCoordinationServicePreempted(self):
self._cluster.kill_task("ps", 0)
time.sleep(_PULL_FREQ_IN_SEC * 2)
# `states` is None since Coordination Service is not available.
self.assertIsNone(self.states)
# Simulate the restart of all the tasks.
for index in range(1, self.num_ps):
self._cluster.kill_task("ps", index)
for index in range(self.num_workers):
self._cluster.kill_task("worker", index)
for index in range(self.num_ps):
self._cluster.start_task("ps", index)
for index in range(self.num_workers):
self._cluster.start_task("worker", index)
context.context().update_server_def(context.get_server_def())
time.sleep(_PULL_FREQ_IN_SEC * 2)
self.assertLen(self.states, self.num_workers + self.num_ps)
for state in self.states:
self.assertIsNone(state)
| GetTaskStatesTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/annotated1.py | {
"start": 396,
"end": 1247
} | class ____(struct2.Packed):
name: Annotated[str, struct2.ctype("<10s")]
serial_num: UnsignedShort
school: SignedChar
def ValueRange(a: int, b: int):
pass
T1 = Annotated[int, ValueRange(-10, 5)]
T2 = Annotated[T1, ValueRange(-20, 3)]
a: Annotated[Annotated[int, "hi"], "hi"] = 3
b: T2 = 5
TypeWithStringArg = Annotated["int", "this string should not be parsed"]
def func2(a: TypeWithStringArg):
return 3
# This should generate an error because the first type argument
# is not a valid type.
c: Annotated["this", "should generate an error"]
# This should generate an error because all Annotated types should
# include at least two type arguments.
d: Annotated[int]
# Verify that generic type aliases can be defined using Annotated.
_T = TypeVar("_T")
Param = Annotated[_T, "x"]
x1: Param[int] = 3
print(Param[int])
| Student |
python | django__django | tests/user_commands/management/commands/mutually_exclusive_required_with_same_dest.py | {
"start": 54,
"end": 488
} | class ____(BaseCommand):
def add_arguments(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--for", dest="until", action="store")
group.add_argument("--until", action="store")
def handle(self, *args, **options):
for option, value in options.items():
if value is not None:
self.stdout.write("%s=%s" % (option, value))
| Command |
python | getsentry__sentry | tests/sentry/utils/sdk_crashes/test_sdk_crash_detection_cocoa.py | {
"start": 24374,
"end": 28572
} | class ____(BaseSDKCrashDetectionMixin):
def test_hub_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[SentryHub getScope]"), True, mock_sdk_crash_reporter
)
def test_sentrycrash_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="sentrycrashdl_getBinaryImage"), True, mock_sdk_crash_reporter
)
def test_sentryisgreat_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[sentryisgreat]"), True, mock_sdk_crash_reporter
)
def test_sentryswizzle_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(
function="__47-[SentryBreadcrumbTracker swizzleViewDidAppear]_block_invoke_2"
),
True,
mock_sdk_crash_reporter,
)
def test_sentry_date_category_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="+[NSDate(SentryExtras) sentry_fromIso8601String:]"),
True,
mock_sdk_crash_reporter,
)
def test_sentry_ns_data_category_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[NSData(Sentry) sentry_nullTerminated:]"),
True,
mock_sdk_crash_reporter,
)
def test_sentry_swift_metric_kit_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="SentryMXManager.didReceive"),
True,
mock_sdk_crash_reporter,
)
def test_sentry_swift_wrong_metric_kit_not_reported(
self, mock_sdk_crash_reporter: MagicMock
) -> None:
self.execute_test(
get_crash_event(function="SentryManager.didReceive"),
False,
mock_sdk_crash_reporter,
)
def test_sentrycrash_crash_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[SentryCrash crash]"),
True,
mock_sdk_crash_reporter,
)
def test_senryhub_not_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[SenryHub getScope]"),
False,
mock_sdk_crash_reporter,
)
def test_senryhub_no_brackets_not_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-SentryHub getScope]"),
False,
mock_sdk_crash_reporter,
)
def test_somesentryhub_not_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="-[SomeSentryHub getScope]"),
False,
mock_sdk_crash_reporter,
)
# "+[SentrySDK crash]" is used for testing, so we must ignore it.
def test_sentrycrash_not_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="+[SentrySDK crash]"),
False,
mock_sdk_crash_reporter,
)
# "+[SentrySDKInternal crash]" is used for testing, so we must ignore it.
def test_sentrySDKInternal_not_reported(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(
get_crash_event(function="+[SentrySDKInternal crash]"),
False,
mock_sdk_crash_reporter,
)
# "SentryCrashExceptionApplicationHelper _crashOnException" calls abort() intentionally, so we must ignore it.
def test_sentrycrash_exception_application_helper_not_reported(
self, mock_sdk_crash_reporter: MagicMock
) -> None:
self.execute_test(
get_crash_event(function="+[SentryCrashExceptionApplicationHelper _crashOnException:]"),
False,
mock_sdk_crash_reporter,
)
| CococaSDKFunctionTestMixin |
python | docker__docker-py | docker/types/services.py | {
"start": 15835,
"end": 18284
} | class ____(dict):
"""
Used to specify the way container updates should be performed by a service.
Args:
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue``, ``pause``, as well as ``rollback`` since API v1.28.
Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out an
updated task. Either ``start-first`` or ``stop-first`` are accepted.
"""
def __init__(self, parallelism=0, delay=None, failure_action='continue',
monitor=None, max_failure_ratio=None, order=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue', 'rollback'):
raise errors.InvalidArgument(
'failure_action must be one of `pause`, `continue`, `rollback`'
)
self['FailureAction'] = failure_action
if monitor is not None:
if not isinstance(monitor, int):
raise TypeError('monitor must be an integer')
self['Monitor'] = monitor
if max_failure_ratio is not None:
if not isinstance(max_failure_ratio, (float, int)):
raise TypeError('max_failure_ratio must be a float')
if max_failure_ratio > 1 or max_failure_ratio < 0:
raise errors.InvalidArgument(
'max_failure_ratio must be a number between 0 and 1'
)
self['MaxFailureRatio'] = max_failure_ratio
if order is not None:
if order not in ('start-first', 'stop-first'):
raise errors.InvalidArgument(
'order must be either `start-first` or `stop-first`'
)
self['Order'] = order
| UpdateConfig |
python | google__pytype | pytype/overlays/metaclass.py | {
"start": 2681,
"end": 3280
} | class ____(abstract.PyTDFunction):
"""Implements with_metaclass."""
@classmethod
def make(cls, ctx, module):
return super().make("with_metaclass", ctx, module)
def call(self, node, func, args, alias_map=None):
"""Creates an anonymous class to act as a metaclass."""
del func, alias_map # unused
self.match_args(node, args)
meta = abstract_utils.get_atomic_value(
args.posargs[0], default=self.ctx.convert.unsolvable
)
bases = args.posargs[1:]
result = WithMetaclassInstance(self.ctx, meta, bases).to_variable(node)
return node, result
| WithMetaclass |
python | neetcode-gh__leetcode | python/0338-counting-bits.py | {
"start": 0,
"end": 282
} | class ____:
def countBits(self, n: int) -> List[int]:
dp = [0] * (n + 1)
offset = 1
for i in range(1, n + 1):
if offset * 2 == i:
offset = i
dp[i] = 1 + dp[i - offset]
return dp
# Another dp solution
| Solution |
python | walkccc__LeetCode | solutions/877. Stone Game/877.py | {
"start": 0,
"end": 468
} | class ____:
def stoneGame(self, piles: list[int]) -> bool:
n = len(piles)
# dp[i][j] := the maximum stones you can get more than your opponent in piles[i..j]
dp = [[0] * n for _ in range(n)]
for i, pile in enumerate(piles):
dp[i][i] = pile
for d in range(1, n):
for i in range(n - d):
j = i + d
dp[i][j] = max(piles[i] - dp[i + 1][j],
piles[j] - dp[i][j - 1])
return dp[0][n - 1] > 0
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 31179,
"end": 35365
} | class ____(sgqlc.types.Enum):
"""The possible item types found in a timeline.
Enumeration Choices:
* `ADDED_TO_PROJECT_EVENT`: Represents a 'added_to_project' event
on a given issue or pull request.
* `ASSIGNED_EVENT`: Represents an 'assigned' event on any
assignable object.
* `CLOSED_EVENT`: Represents a 'closed' event on any `Closable`.
* `COMMENT_DELETED_EVENT`: Represents a 'comment_deleted' event on
a given issue or pull request.
* `CONNECTED_EVENT`: Represents a 'connected' event on a given
issue or pull request.
* `CONVERTED_NOTE_TO_ISSUE_EVENT`: Represents a
'converted_note_to_issue' event on a given issue or pull
request.
* `CONVERTED_TO_DISCUSSION_EVENT`: Represents a
'converted_to_discussion' event on a given issue.
* `CROSS_REFERENCED_EVENT`: Represents a mention made by one issue
or pull request to another.
* `DEMILESTONED_EVENT`: Represents a 'demilestoned' event on a
given issue or pull request.
* `DISCONNECTED_EVENT`: Represents a 'disconnected' event on a
given issue or pull request.
* `ISSUE_COMMENT`: Represents a comment on an Issue.
* `LABELED_EVENT`: Represents a 'labeled' event on a given issue
or pull request.
* `LOCKED_EVENT`: Represents a 'locked' event on a given issue or
pull request.
* `MARKED_AS_DUPLICATE_EVENT`: Represents a 'marked_as_duplicate'
event on a given issue or pull request.
* `MENTIONED_EVENT`: Represents a 'mentioned' event on a given
issue or pull request.
* `MILESTONED_EVENT`: Represents a 'milestoned' event on a given
issue or pull request.
* `MOVED_COLUMNS_IN_PROJECT_EVENT`: Represents a
'moved_columns_in_project' event on a given issue or pull
request.
* `PINNED_EVENT`: Represents a 'pinned' event on a given issue or
pull request.
* `REFERENCED_EVENT`: Represents a 'referenced' event on a given
`ReferencedSubject`.
* `REMOVED_FROM_PROJECT_EVENT`: Represents a
'removed_from_project' event on a given issue or pull request.
* `RENAMED_TITLE_EVENT`: Represents a 'renamed' event on a given
issue or pull request
* `REOPENED_EVENT`: Represents a 'reopened' event on any
`Closable`.
* `SUBSCRIBED_EVENT`: Represents a 'subscribed' event on a given
`Subscribable`.
* `TRANSFERRED_EVENT`: Represents a 'transferred' event on a given
issue or pull request.
* `UNASSIGNED_EVENT`: Represents an 'unassigned' event on any
assignable object.
* `UNLABELED_EVENT`: Represents an 'unlabeled' event on a given
issue or pull request.
* `UNLOCKED_EVENT`: Represents an 'unlocked' event on a given
issue or pull request.
* `UNMARKED_AS_DUPLICATE_EVENT`: Represents an
'unmarked_as_duplicate' event on a given issue or pull request.
* `UNPINNED_EVENT`: Represents an 'unpinned' event on a given
issue or pull request.
* `UNSUBSCRIBED_EVENT`: Represents an 'unsubscribed' event on a
given `Subscribable`.
* `USER_BLOCKED_EVENT`: Represents a 'user_blocked' event on a
given user.
"""
__schema__ = github_schema
__choices__ = (
"ADDED_TO_PROJECT_EVENT",
"ASSIGNED_EVENT",
"CLOSED_EVENT",
"COMMENT_DELETED_EVENT",
"CONNECTED_EVENT",
"CONVERTED_NOTE_TO_ISSUE_EVENT",
"CONVERTED_TO_DISCUSSION_EVENT",
"CROSS_REFERENCED_EVENT",
"DEMILESTONED_EVENT",
"DISCONNECTED_EVENT",
"ISSUE_COMMENT",
"LABELED_EVENT",
"LOCKED_EVENT",
"MARKED_AS_DUPLICATE_EVENT",
"MENTIONED_EVENT",
"MILESTONED_EVENT",
"MOVED_COLUMNS_IN_PROJECT_EVENT",
"PINNED_EVENT",
"REFERENCED_EVENT",
"REMOVED_FROM_PROJECT_EVENT",
"RENAMED_TITLE_EVENT",
"REOPENED_EVENT",
"SUBSCRIBED_EVENT",
"TRANSFERRED_EVENT",
"UNASSIGNED_EVENT",
"UNLABELED_EVENT",
"UNLOCKED_EVENT",
"UNMARKED_AS_DUPLICATE_EVENT",
"UNPINNED_EVENT",
"UNSUBSCRIBED_EVENT",
"USER_BLOCKED_EVENT",
)
| IssueTimelineItemsItemType |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/backcompat/test_backwards_compat_converters.py | {
"start": 1424,
"end": 9477
} | class ____:
def to_k8s_client_obj(self):
return "converted_object"
def test__convert_kube_model_object_normal_value():
obj = MockKubeModelObject()
new_class = type(obj)
result = _convert_kube_model_object(obj, new_class)
assert result == "converted_object"
def test__convert_kube_model_object_already_instance():
result = _convert_kube_model_object("obj", str)
assert result == "obj"
def test__convert_kube_model_object_invalid_type():
obj = "obj"
with pytest.raises(AirflowException) as exc_info:
_convert_kube_model_object(obj, int)
assert str(exc_info.value) == f"Expected {int}, got {type(obj)}"
# testcase of _convert_from_dict() function
@pytest.fixture
def mock_api_client():
with patch(
"airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters.ApiClient"
) as mock_class:
instance = mock_class.return_value
instance._ApiClient__deserialize_model = Mock(return_value="mocked_instance")
yield instance
def test_convert_from_dict_with_new_class_instance(mock_api_client):
obj = Mock()
result = _convert_from_dict(obj, type(obj))
assert result == obj
def test_convert_from_dict_with_dict(mock_api_client):
obj = {"key": "value"}
new_class = Mock()
result = _convert_from_dict(obj, type(new_class))
mock_api_client._ApiClient__deserialize_model.assert_called_once_with(obj, type(new_class))
assert result == "mocked_instance"
def test_convert_from_dict_with_invalid_type():
obj = "not a dict"
new_class = Mock()
with pytest.raises(AirflowException) as exc_info:
_convert_from_dict(obj, type(new_class))
assert str(exc_info.value) == "Expected dict or <class 'unittest.mock.Mock'>, got <class 'str'>"
# testcase of convert_volume() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_kube_model_object")
def test_convert_volume_normal_value(mock_convert_kube_model_object):
mock_convert_kube_model_object.return_value = k8s.V1Volume(name="test_convert_volume")
volume = Mock()
result = convert_volume(volume)
mock_convert_kube_model_object.assert_called_once_with(volume, k8s.V1Volume)
assert result.name == "test_convert_volume"
# testcase of convert_volume_mount() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_kube_model_object")
def test_convert_volume_mount_normal_value(mock_convert_kube_model_object):
mock_convert_kube_model_object.return_value = k8s.V1VolumeMount(
name="test_volume_mount", mount_path="/mnt/test"
)
volume = Mock()
result = convert_volume_mount(volume)
mock_convert_kube_model_object.assert_called_once_with(volume, k8s.V1VolumeMount)
assert result.name == "test_volume_mount"
assert result.mount_path == "/mnt/test"
# testcase of convert_port() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_kube_model_object")
def test_convert_port_normal_value(mock_convert_kube_model_object):
mock_convert_kube_model_object.return_value = k8s.V1ContainerPort(container_port=80)
volume = Mock()
result = convert_port(volume)
mock_convert_kube_model_object.assert_called_once_with(volume, k8s.V1ContainerPort)
assert result.container_port == 80
# testcase of convert_env_vars() function
def test_convert_env_vars_with_dict():
# Normal value input case test
env_vars = {"FOO": "bar", "BAZ": "qux"}
result = convert_env_vars(env_vars)
expected_result = [k8s.V1EnvVar(name="FOO", value="bar"), k8s.V1EnvVar(name="BAZ", value="qux")]
assert isinstance(result, list)
assert len(result) == 2
assert result == expected_result
def test_convert_env_vars_with_list():
# Normal value input case test
env_vars = [k8s.V1EnvVar(name="FOO", value="bar"), k8s.V1EnvVar(name="BAZ", value="qux")]
result = convert_env_vars(env_vars)
assert result == env_vars
# testcase of convert_env_vars_or_raise_error() function
def test_convert_env_vars_or_raise_error_normal_value():
env_vars_dict = {"ENV1": "value1", "ENV2": "value2"}
result = convert_env_vars_or_raise_error(env_vars_dict)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].name == "ENV1"
assert result[0].value == "value1"
assert result[1].name == "ENV2"
assert result[1].value == "value2"
def test_convert_env_vars_or_raise_error_list_value():
env_vars_list = [k8s.V1EnvVar(name="ENV1", value="value1"), k8s.V1EnvVar(name="ENV2", value="value2")]
result = convert_env_vars_or_raise_error(env_vars_list)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].name == "ENV1"
assert result[0].value == "value1"
assert result[1].name == "ENV2"
assert result[1].value == "value2"
assert result == env_vars_list
def test_convert_env_vars_or_raise_error_invalid_type():
invalid_input = 123
with pytest.raises(AirflowException) as exc_info:
convert_env_vars_or_raise_error(invalid_input)
assert str(exc_info.value) == f"Expected dict or list, got {type(invalid_input)}"
# testcase of convert_pod_runtime_info_env() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_kube_model_object")
def test_convert_pod_runtime_info_env_normal_value(mock_convert_kube_model_object):
mock_convert_kube_model_object.return_value = k8s.V1EnvVar(name="FOO", value="bar")
volume = Mock()
result = convert_pod_runtime_info_env(volume)
mock_convert_kube_model_object.assert_called_once_with(volume, k8s.V1EnvVar)
assert result.name == "FOO"
assert result.value == "bar"
# testcase of convert_image_pull_secrets() function
def test_convert_image_pull_secrets_normal_value():
image_pull_secrets = "secret1,secret2,secret3"
result = convert_image_pull_secrets(image_pull_secrets)
expected_result = [
k8s.V1LocalObjectReference(name="secret1"),
k8s.V1LocalObjectReference(name="secret2"),
k8s.V1LocalObjectReference(name="secret3"),
]
assert isinstance(result, list)
assert len(result) == 3
assert result == expected_result
def test_convert_image_pull_secrets_not_string():
image_pull_secrets = ["single_secret"]
result = convert_image_pull_secrets(image_pull_secrets)
assert isinstance(result, list)
assert len(result) == 1
assert result == image_pull_secrets
# testcase of convert_configmap() function
def test_convert_configmap_normal_value():
configmaps = "test-configmap"
result = convert_configmap(configmaps)
assert isinstance(result, k8s.V1EnvFromSource)
assert result.config_map_ref.name == "test-configmap"
# testcase of convert_affinity() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_from_dict")
def test_convert_affinity_normal_value(mock_convert_from_dict):
affinity = {"some_key": "some_value"}
expected_result = Mock(k8s.V1Affinity)
mock_convert_from_dict.return_value = expected_result
result = convert_affinity(affinity)
mock_convert_from_dict.assert_called_once_with(affinity, k8s.V1Affinity)
assert result == expected_result
# testcase of convert_toleration() function
@patch("airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters._convert_from_dict")
def test_convert_toleration_normal_value(mock_convert_from_dict):
toleration = {
"key": "key",
"operator": "Equal",
"value": "value",
"effect": "NoExecute",
"toleration_seconds": 600,
}
expected_result = Mock(spec=k8s.V1Toleration)
mock_convert_from_dict.return_value = expected_result
result = convert_toleration(toleration)
mock_convert_from_dict.assert_called_once_with(toleration, k8s.V1Toleration)
assert isinstance(result, k8s.V1Toleration)
assert result == expected_result
| MockKubeModelObject |
python | crytic__slither | slither/tools/mutator/__main__.py | {
"start": 3932,
"end": 15074
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self, parser: Any, *args: Any, **kwargs: Any
) -> None: # pylint: disable=signature-differs
checks = _get_mutators(None)
output_mutators(checks)
parser.exit()
# endregion
###################################################################################
###################################################################################
# region Main
###################################################################################
###################################################################################
def main() -> None: # pylint: disable=too-many-statements,too-many-branches,too-many-locals
args = parse_args()
# arguments
test_command: str = args.test_cmd
test_directory: Optional[str] = args.test_dir
paths_to_ignore: Optional[str] = args.ignore_dirs
output_dir: Optional[str] = args.output_dir
timeout: Optional[int] = args.timeout
solc_remappings: Optional[str] = args.solc_remaps
verbose: Optional[bool] = args.verbose
mutators_to_run: Optional[List[str]] = args.mutators_to_run
comprehensive_flag: Optional[bool] = args.comprehensive
logger.info(blue(f"Starting mutation campaign in {args.codebase}"))
if paths_to_ignore:
paths_to_ignore_list = paths_to_ignore.strip("][").split(",")
else:
paths_to_ignore_list = []
contract_names: List[str] = []
if args.contract_names:
contract_names = args.contract_names.split(",")
# get all the contracts as a list from given codebase
sol_file_list: List[str] = get_sol_file_list(Path(args.codebase), paths_to_ignore_list)
if not contract_names:
logger.info(blue("Preparing to mutate files:\n- " + "\n- ".join(sol_file_list)))
else:
logger.info(blue("Preparing to mutate contracts:\n- " + "\n- ".join(contract_names)))
# folder where backup files and uncaught mutants are saved
if output_dir is None:
output_dir = "./mutation_campaign"
output_folder = Path(output_dir).resolve()
if output_folder.is_dir():
shutil.rmtree(output_folder)
# setting RR mutator as first mutator
mutators_list = _get_mutators(mutators_to_run)
# insert RR and CR in front of the list
CR_RR_list = []
duplicate_list = mutators_list.copy()
for M in duplicate_list:
if M.NAME == "RR":
mutators_list.remove(M)
CR_RR_list.insert(0, M)
elif M.NAME == "CR":
mutators_list.remove(M)
CR_RR_list.insert(1, M)
mutators_list = CR_RR_list + mutators_list
logger.info(blue("Timing tests.."))
# run and time tests, abort if they're broken
start_time = time.time()
# no timeout or target_file during the first run, but be verbose on failure
if not run_test_cmd(test_command, None, None, True):
logger.error(red("Test suite fails with mutations, aborting"))
return
elapsed_time = round(time.time() - start_time)
# set default timeout
# default to twice as long as it usually takes to run the test suite
if timeout is None:
timeout = int(elapsed_time * 2)
else:
timeout = int(timeout)
if timeout < elapsed_time:
logger.info(
red(
f"Provided timeout {timeout} is too short for tests that run in {elapsed_time} seconds"
)
)
return
logger.info(
green(
f"Test suite passes in {elapsed_time} seconds, commencing mutation campaign with a timeout of {timeout} seconds\n"
)
)
# Keep a list of all already mutated contracts so we don't mutate them twice
mutated_contracts: List[str] = []
for filename in sol_file_list: # pylint: disable=too-many-nested-blocks
file_name = os.path.split(filename)[1].split(".sol")[0]
# slither object
sl = Slither(filename, **vars(args))
# create a backup files
files_dict = backup_source_file(sl.source_code, output_folder)
# total revert/comment/tweak mutants that were generated and compiled
total_mutant_counts = [0, 0, 0]
# total uncaught revert/comment/tweak mutants
uncaught_mutant_counts = [0, 0, 0]
# lines those need not be mutated (taken from RR and CR)
dont_mutate_lines = []
# perform mutations on {target_contract} in file {file_name}
# setup placeholder val to signal whether we need to skip if no target_contract is found
skip_flag = "SLITHER_SKIP_MUTATIONS"
target_contract = skip_flag if contract_names else ""
try:
# loop through all contracts in file_name
for compilation_unit_of_main_file in sl.compilation_units:
for contract in compilation_unit_of_main_file.contracts:
if contract.name in contract_names and contract.name not in mutated_contracts:
target_contract = contract
break
if not contract_names and contract.name.lower() == file_name.lower():
target_contract = contract
break
if target_contract == "":
logger.info(
f"Cannot find contracts in file {filename}, try specifying them with --contract-names"
)
continue
if target_contract == skip_flag:
continue
if target_contract.is_interface:
logger.debug(f"Skipping mutations on interface {filename}")
continue
# Add our target to the mutation list
mutated_contracts.append(target_contract.name)
logger.info(blue(f"Mutating contract {target_contract}"))
for M in mutators_list:
m = M(
compilation_unit_of_main_file,
int(timeout),
test_command,
test_directory,
target_contract,
solc_remappings,
verbose,
output_folder,
dont_mutate_lines,
)
(total_counts, uncaught_counts, lines_list) = m.mutate()
if m.NAME == "RR":
total_mutant_counts[0] += total_counts[0]
uncaught_mutant_counts[0] += uncaught_counts[0]
if verbose:
logger.info(
magenta(
f"Mutator {m.NAME} found {uncaught_counts[0]} uncaught revert mutants (out of {total_counts[0]} that compile)"
)
)
elif m.NAME == "CR":
total_mutant_counts[1] += total_counts[1]
uncaught_mutant_counts[1] += uncaught_counts[1]
if verbose:
logger.info(
magenta(
f"Mutator {m.NAME} found {uncaught_counts[1]} uncaught comment mutants (out of {total_counts[1]} that compile)"
)
)
else:
total_mutant_counts[2] += total_counts[2]
uncaught_mutant_counts[2] += uncaught_counts[2]
if verbose:
logger.info(
magenta(
f"Mutator {m.NAME} found {uncaught_counts[2]} uncaught tweak mutants (out of {total_counts[2]} that compile)"
)
)
logger.info(
magenta(
f"Running total: found {uncaught_mutant_counts[2]} uncaught tweak mutants (out of {total_mutant_counts[2]} that compile)"
)
)
dont_mutate_lines = lines_list
if comprehensive_flag:
dont_mutate_lines = []
except Exception as e: # pylint: disable=broad-except
logger.error(e)
transfer_and_delete(files_dict)
except KeyboardInterrupt:
# transfer and delete the backup files if interrupted
logger.error("\nExecution interrupted by user (Ctrl + C). Cleaning up...")
transfer_and_delete(files_dict)
# transfer and delete the backup files
transfer_and_delete(files_dict)
if target_contract == skip_flag:
logger.debug(f"No target contracts found in {filename}, skipping")
continue
# log results for this file
logger.info(blue(f"Done mutating {target_contract}."))
if total_mutant_counts[0] > 0:
logger.info(
magenta(
f"Revert mutants: {uncaught_mutant_counts[0]} uncaught of {total_mutant_counts[0]} ({100 * uncaught_mutant_counts[0]/total_mutant_counts[0]}%)"
)
)
else:
logger.info(magenta("Zero Revert mutants analyzed"))
if total_mutant_counts[1] > 0:
logger.info(
magenta(
f"Comment mutants: {uncaught_mutant_counts[1]} uncaught of {total_mutant_counts[1]} ({100 * uncaught_mutant_counts[1]/total_mutant_counts[1]}%)"
)
)
else:
logger.info(magenta("Zero Comment mutants analyzed"))
if total_mutant_counts[2] > 0:
logger.info(
magenta(
f"Tweak mutants: {uncaught_mutant_counts[2]} uncaught of {total_mutant_counts[2]} ({100 * uncaught_mutant_counts[2]/total_mutant_counts[2]}%)\n"
)
)
else:
logger.info(magenta("Zero Tweak mutants analyzed\n"))
# Reset mutant counts before moving on to the next file
total_mutant_counts[0] = 0
total_mutant_counts[1] = 0
total_mutant_counts[2] = 0
uncaught_mutant_counts[0] = 0
uncaught_mutant_counts[1] = 0
uncaught_mutant_counts[2] = 0
# Print the total time elapsed in a human-readable time format
elapsed_time = round(time.time() - start_time)
hours, remainder = divmod(elapsed_time, 3600)
minutes, seconds = divmod(remainder, 60)
if hours > 0:
elapsed_string = f"{hours} {'hour' if hours == 1 else 'hours'}"
elif minutes > 0:
elapsed_string = f"{minutes} {'minute' if minutes == 1 else 'minutes'}"
else:
elapsed_string = f"{seconds} {'second' if seconds == 1 else 'seconds'}"
logger.info(
blue(f"Finished mutation testing assessment of '{args.codebase}' in {elapsed_string}\n")
)
# endregion
| ListMutators |
python | doocs__leetcode | solution/1200-1299/1200.Minimum Absolute Difference/Solution.py | {
"start": 0,
"end": 221
} | class ____:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
arr.sort()
mi = min(b - a for a, b in pairwise(arr))
return [[a, b] for a, b in pairwise(arr) if b - a == mi]
| Solution |
python | python-poetry__poetry | src/poetry/console/commands/python/remove.py | {
"start": 633,
"end": 3446
} | class ____(Command):
name = "python remove"
arguments: ClassVar[list[Argument]] = [
argument("python", "The python version to remove.", multiple=True)
]
options: ClassVar[list[Option]] = [
option(
"free-threaded", "t", "Use free-threaded version if available.", flag=True
),
option(
"implementation",
"i",
"Python implementation to use. (cpython, pypy)",
flag=False,
default="cpython",
),
]
description = (
"Remove the specified Python version if managed by Poetry."
" (<warning>experimental feature</warning>)"
)
@staticmethod
def remove_python_installation(
request: str, implementation: str, free_threaded: bool, io: IO
) -> int:
if request.endswith("t"):
free_threaded = True
request = request[:-1]
try:
version = Version.parse(request)
except (ValueError, InvalidVersionError):
io.write_error_line(
f"<error>Invalid Python version requested <b>{request}</></error>"
)
return 1
if version.minor is None or version.patch is None:
io.write_error_line(
f"<error>Invalid Python version requested <b>{request}</></error>\n"
)
io.write_error_line(
"You need to provide an exact Python version in the format <c1>X.Y.Z</> to be removed.\n\n"
"You can use <c1>poetry python list -m</> to list installed Poetry managed Python versions."
)
return 1
request_title = get_request_title(request, implementation, free_threaded)
path = PoetryPythonPathProvider.installation_dir(
version, implementation, free_threaded
)
if path.exists():
if io.is_verbose():
io.write_line(f"Installation path: {path}")
io.write(f"Removing installation {request_title} ... ")
try:
shutil.rmtree(path)
except OSError as e:
io.write("<fg=red>Failed</>\n")
if io.is_verbose():
io.write_line(f"Failed to remove directory: {e}")
io.write("<fg=green>Done</>\n")
else:
io.write_line(f"No installation was found at {path}.")
return 0
def handle(self) -> int:
implementation = self.option("implementation").lower()
free_threaded = self.option("free-threaded")
result = 0
for request in self.argument("python"):
result += self.remove_python_installation(
request, implementation, free_threaded, self.io
)
return result
| PythonRemoveCommand |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.