language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/wav2vec2/test_modeling_wav2vec2.py | {
"start": 26025,
"end": 42752
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
Wav2Vec2ForCTC,
Wav2Vec2Model,
Wav2Vec2ForMaskedLM,
Wav2Vec2ForSequenceClassification,
Wav2Vec2ForPreTraining,
Wav2Vec2ForAudioFrameClassification,
Wav2Vec2ForXVector,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = Wav2Vec2ModelTester(
self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True
)
self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@is_flaky(
description="The `codevector_idx` computed with `argmax()` in `Wav2Vec2GumbelVectorQuantizer.forward` is not stable."
)
def test_batching_equivalence(self):
super().test_batching_equivalence()
def test_model_with_adapter(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_adapter(*config_and_inputs)
def test_model_with_adapter_proj_dim(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs)
def test_model_with_attn_adapter(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_attn_adapter(*config_and_inputs)
def test_batched_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_batch_inference(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_xvector_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_xvector_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
@unittest.skip(reason="Model has no token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
def test_model_for_pretraining(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = Wav2Vec2ForPreTraining(config).to(torch_device)
batch_size = inputs_dict["input_values"].shape[0]
feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1]))
features_shape = (batch_size, feature_seq_length)
mask_time_indices = _compute_mask_indices(
features_shape,
model.config.mask_time_prob,
model.config.mask_time_length,
min_masks=2,
)
sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices)
mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device)
sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device)
loss = model(
inputs_dict["input_values"],
attention_mask=inputs_dict["attention_mask"],
mask_time_indices=mask_time_indices,
sampled_negative_indices=sampled_negative_indices,
).loss
# more losses
mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True
sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy())
sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device)
loss_more_masked = model(
inputs_dict["input_values"],
attention_mask=inputs_dict["attention_mask"],
mask_time_indices=mask_time_indices,
sampled_negative_indices=sampled_negative_indices,
).loss
# loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted
self.assertTrue(loss.item() <= loss_more_masked.item())
def test_mask_feature_prob_ctc(self):
model = Wav2Vec2ForCTC.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_prob_ctc(self):
model = Wav2Vec2ForCTC.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_feature_prob_ctc_single_batch(self):
model = Wav2Vec2ForCTC.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2",
mask_time_prob=0.2,
mask_feature_prob=0.2,
mask_time_length=2,
mask_feature_length=2,
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
batch_duration_in_seconds = [6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (1, 1498, 32))
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
def test_load_and_set_attn_adapter(self):
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
def get_logits(model, input_features):
model = model.to(torch_device)
batch = processor(
input_features,
padding=True,
sampling_rate=processor.feature_extractor.sampling_rate,
return_tensors="pt",
)
with torch.no_grad():
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
return logits
input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]]
model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="it")
logits = get_logits(model, input_features)
model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter")
model_2.load_adapter("it")
logits_2 = get_logits(model_2, input_features)
torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
# test that loading adapter weights with mismatched vocab sizes can be loaded
def test_load_target_lang_with_mismatched_size(self):
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
def get_logits(model, input_features):
model = model.to(torch_device)
batch = processor(
input_features,
padding=True,
sampling_rate=processor.feature_extractor.sampling_rate,
return_tensors="pt",
)
with torch.no_grad():
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
return logits
input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]]
model = Wav2Vec2ForCTC.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="fr", ignore_mismatched_sizes=True
)
logits = get_logits(model, input_features)
model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter")
model_2.load_adapter("fr")
logits_2 = get_logits(model_2, input_features)
torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
def test_load_attn_adapter(self):
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True
)
def get_logits(model, input_features):
model = model.to(torch_device)
batch = processor(
input_features,
padding=True,
sampling_rate=processor.feature_extractor.sampling_rate,
return_tensors="pt",
)
with torch.no_grad():
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
return logits
input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]]
model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", adapter_attn_dim=16)
with tempfile.TemporaryDirectory() as tempdir:
model.save_pretrained(tempdir)
model = Wav2Vec2ForCTC.from_pretrained(tempdir)
logits = get_logits(model, input_features)
adapter_weights = model._get_adapters()
# save safe weights
safe_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_SAFE_FILE.format("eng"))
safe_save_file(adapter_weights, safe_filepath, metadata={"format": "pt"})
model.load_adapter("eng")
model.load_adapter("eng", use_safetensors=True)
with self.assertRaises(OSError):
model.load_adapter("eng", use_safetensors=False)
with self.assertRaises(Exception):
model.load_adapter("ita", use_safetensors=True)
logits_2 = get_logits(model, input_features)
torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
with tempfile.TemporaryDirectory() as tempdir:
model.save_pretrained(tempdir)
model = Wav2Vec2ForCTC.from_pretrained(tempdir)
logits = get_logits(model, input_features)
adapter_weights = model._get_adapters()
# save pt weights
pt_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_PT_FILE.format("eng"))
torch.save(adapter_weights, pt_filepath)
model.load_adapter("eng")
model.load_adapter("eng", use_safetensors=False)
with self.assertRaises(OSError):
model.load_adapter("eng", use_safetensors=True)
logits_2 = get_logits(model, input_features)
torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter")
logits = get_logits(model, input_features)
model.load_adapter("eng")
model.load_adapter("eng", use_safetensors=False)
model.load_adapter("eng", use_safetensors=True)
logits_2 = get_logits(model, input_features)
torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
@slow
def test_model_from_pretrained(self):
model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsNotNone(model)
@require_torch
| Wav2Vec2RobustModelTest |
python | astropy__astropy | astropy/utils/parsing.py | {
"start": 3085,
"end": 4843
} | class ____:
"""Wrap a parser produced by ``ply.yacc.yacc``.
It provides a :meth:`parse` method that is thread-safe.
"""
def __init__(self, parser: LRParser) -> None:
self.parser = parser
self._lock = threading.RLock()
def parse(self, *args, **kwargs):
"""Run the wrapped parser, with a lock to ensure serialization."""
with self._lock:
return self.parser.parse(*args, **kwargs)
def yacc(tabmodule: str, package: str) -> ThreadSafeParser:
"""Create a parser from local variables.
It automatically compiles the parser in optimized mode, writing to
``tabmodule`` in the same directory as the calling file.
This function is thread-safe, and the returned parser is also thread-safe,
provided that it does not share a lexer with any other parser.
It is only intended to work with parsers defined within the calling
function, rather than at class or module scope.
Parameters
----------
tabmodule : str
Name for the file to write with the generated tables, if it does not
already exist (without ``.py`` suffix).
package : str
Name of a test package which should be run with pytest to regenerate
the output file. This is inserted into a comment in the generated
file.
"""
from astropy.extern.ply import yacc
caller_dir = Path(yacc.get_caller_module_dict(2)["__file__"]).parent
with _LOCK, _patch_ply_module(yacc, caller_dir / (tabmodule + ".py"), package):
parser = yacc.yacc(
tabmodule=tabmodule,
outputdir=caller_dir,
debug=False,
optimize=True,
write_tables=True,
)
return ThreadSafeParser(parser)
| ThreadSafeParser |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1044178,
"end": 1044867
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateRepositoryWebCommitSignoffSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "message", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the web commit signoff
setting.
"""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The updated repository."""
| UpdateRepositoryWebCommitSignoffSettingPayload |
python | pytorch__pytorch | torch/autograd/profiler_util.py | {
"start": 19030,
"end": 20089
} | class ____:
"""Helpers for FunctionEvent and FunctionEventAvg.
The subclass should define `*_time_total` and `count` attributes.
"""
cpu_time_str = _attr_formatter("cpu_time")
device_time_str = _attr_formatter("device_time")
cpu_time_total_str = _attr_formatter("cpu_time_total")
device_time_total_str = _attr_formatter("device_time_total")
self_cpu_time_total_str = _attr_formatter("self_cpu_time_total")
self_device_time_total_str = _attr_formatter("self_device_time_total")
@property
def cpu_time(self):
return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined]
@property
def device_time(self):
return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined]
@property
@deprecated(
"`cuda_time` is deprecated, please use `device_time` instead.",
category=FutureWarning,
)
def cuda_time(self): # To be deprecated
return self.device_time
| FormattedTimesMixin |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 395993,
"end": 403696
} | class ____(rv_continuous):
r"""A studentized range continuous random variable.
%(before_notes)s
See Also
--------
t: Student's t distribution
Notes
-----
The probability density function for `studentized_range` is:
.. math::
f(x; k, \nu) = \frac{k(k-1)\nu^{\nu/2}}{\Gamma(\nu/2)
2^{\nu/2-1}} \int_{0}^{\infty} \int_{-\infty}^{\infty}
s^{\nu} e^{-\nu s^2/2} \phi(z) \phi(sx + z)
[\Phi(sx + z) - \Phi(z)]^{k-2} \,dz \,ds
for :math:`x ≥ 0`, :math:`k > 1`, and :math:`\nu > 0`.
`studentized_range` takes ``k`` for :math:`k` and ``df`` for :math:`\nu`
as shape parameters.
When :math:`\nu` exceeds 100,000, an asymptotic approximation (infinite
degrees of freedom) is used to compute the cumulative distribution
function [4]_ and probability distribution function.
%(after_notes)s
References
----------
.. [1] "Studentized range distribution",
https://en.wikipedia.org/wiki/Studentized_range_distribution
.. [2] Batista, Ben Dêivide, et al. "Externally Studentized Normal Midrange
Distribution." Ciência e Agrotecnologia, vol. 41, no. 4, 2017, pp.
378-389., doi:10.1590/1413-70542017414047716.
.. [3] Harter, H. Leon. "Tables of Range and Studentized Range." The Annals
of Mathematical Statistics, vol. 31, no. 4, 1960, pp. 1122-1147.
JSTOR, www.jstor.org/stable/2237810. Accessed 18 Feb. 2021.
.. [4] Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
Upper Quantiles for the Studentized Range." Journal of the Royal
Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
Feb. 2021.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import studentized_range
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Display the probability density function (``pdf``):
>>> k, df = 3, 10
>>> x = np.linspace(studentized_range.ppf(0.01, k, df),
... studentized_range.ppf(0.99, k, df), 100)
>>> ax.plot(x, studentized_range.pdf(x, k, df),
... 'r-', lw=5, alpha=0.6, label='studentized_range pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = studentized_range(k, df)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = studentized_range.ppf([0.001, 0.5, 0.999], k, df)
>>> np.allclose([0.001, 0.5, 0.999], studentized_range.cdf(vals, k, df))
True
Rather than using (``studentized_range.rvs``) to generate random variates,
which is very slow for this distribution, we can approximate the inverse
CDF using an interpolator, and then perform inverse transform sampling
with this approximate inverse CDF.
This distribution has an infinite but thin right tail, so we focus our
attention on the leftmost 99.9 percent.
>>> a, b = studentized_range.ppf([0, .999], k, df)
>>> a, b
0, 7.41058083802274
>>> from scipy.interpolate import interp1d
>>> rng = np.random.default_rng()
>>> xs = np.linspace(a, b, 50)
>>> cdf = studentized_range.cdf(xs, k, df)
# Create an interpolant of the inverse CDF
>>> ppf = interp1d(cdf, xs, fill_value='extrapolate')
# Perform inverse transform sampling using the interpolant
>>> r = ppf(rng.uniform(size=1000))
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
def _argcheck(self, k, df):
return (k > 1) & (df > 0)
def _shape_info(self):
ik = _ShapeInfo("k", False, (1, np.inf), (False, False))
idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
return [ik, idf]
def _fitstart(self, data):
# Default is k=1, but that is not a valid value of the parameter.
return super()._fitstart(data, args=(2, 1))
def _munp(self, K, k, df):
cython_symbol = '_studentized_range_moment'
_a, _b = self._get_support()
# all three of these are used to create a numpy array so they must
# be the same shape.
def _single_moment(K, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [K, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf), (_a, _b)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_moment, 3, 1)
return np.asarray(ufunc(K, k, df), dtype=np.float64)[()]
def _pdf(self, x, k, df):
def _single_pdf(q, k, df):
# The infinite form of the PDF is derived from the infinite
# CDF.
if df < 100000:
cython_symbol = '_studentized_range_pdf'
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_pdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_pdf, 3, 1)
return np.asarray(ufunc(x, k, df), dtype=np.float64)[()]
def _cdf(self, x, k, df):
def _single_cdf(q, k, df):
# "When the degrees of freedom V are infinite the probability
# integral takes [on a] simpler form," and a single asymptotic
# integral is evaluated rather than the standard double integral.
# (Lund, Lund, page 205)
if df < 100000:
cython_symbol = '_studentized_range_cdf'
log_const = _stats._studentized_range_cdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_cdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_cdf, 3, 1)
# clip p-values to ensure they are in [0, 1].
return np.clip(np.asarray(ufunc(x, k, df), dtype=np.float64)[()], 0, 1)
studentized_range = studentized_range_gen(name='studentized_range', a=0,
b=np.inf)
| studentized_range_gen |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 13286,
"end": 13658
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = FNetLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->FNet
| FNetOnlyMLMHead |
python | public-apis__public-apis | scripts/tests/test_validate_links.py | {
"start": 462,
"end": 5725
} | class ____(unittest.TestCase):
def setUp(self):
self.duplicate_links = [
'https://www.example.com',
'https://www.example.com',
'https://www.example.com',
'https://www.anotherexample.com',
]
self.no_duplicate_links = [
'https://www.firstexample.com',
'https://www.secondexample.com',
'https://www.anotherexample.com',
]
self.code_200 = 200
self.code_403 = 403
self.code_503 = 503
self.cloudflare_headers = {'Server': 'cloudflare'}
self.no_cloudflare_headers = {'Server': 'google'}
self.text_with_cloudflare_flags = '403 Forbidden Cloudflare We are checking your browser...'
self.text_without_cloudflare_flags = 'Lorem Ipsum'
def test_find_link_in_text(self):
text = """
# this is valid
http://example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com.br
https://www.example.com.gov.br
[Example](https://www.example.com?param1=1¶m2=2#anchor)
lorem ipsum https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor lorem ipsum
# this not is valid
example.com
https:example.com
https:/example.com
https//example.com
https//.com
"""
links = find_links_in_text(text)
self.assertIsInstance(links, list)
self.assertEqual(len(links), 7)
for link in links:
with self.subTest():
self.assertIsInstance(link, str)
def test_find_link_in_text_with_invalid_argument(self):
with self.assertRaises(TypeError):
find_links_in_text()
find_links_in_text(1)
find_links_in_text(True)
def test_if_check_duplicate_links_has_the_correct_return(self):
result_1 = check_duplicate_links(self.duplicate_links)
result_2 = check_duplicate_links(self.no_duplicate_links)
self.assertIsInstance(result_1, tuple)
self.assertIsInstance(result_2, tuple)
has_duplicate_links, links = result_1
no_duplicate_links, no_links = result_2
self.assertTrue(has_duplicate_links)
self.assertFalse(no_duplicate_links)
self.assertIsInstance(links, list)
self.assertIsInstance(no_links, list)
self.assertEqual(len(links), 2)
self.assertEqual(len(no_links), 0)
def test_if_fake_user_agent_has_a_str_as_return(self):
user_agent = fake_user_agent()
self.assertIsInstance(user_agent, str)
def test_get_host_from_link(self):
links = [
'example.com',
'https://example.com',
'https://www.example.com',
'https://www.example.com.br',
'https://www.example.com/route',
'https://www.example.com?p=1&q=2',
'https://www.example.com#anchor'
]
for link in links:
host = get_host_from_link(link)
with self.subTest():
self.assertIsInstance(host, str)
self.assertNotIn('://', host)
self.assertNotIn('/', host)
self.assertNotIn('?', host)
self.assertNotIn('#', host)
with self.assertRaises(TypeError):
get_host_from_link()
def test_has_cloudflare_protection_with_code_403_and_503_in_response(self):
resp_with_cloudflare_protection_code_403 = FakeResponse(
code=self.code_403,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
resp_with_cloudflare_protection_code_503 = FakeResponse(
code=self.code_503,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_with_cloudflare_protection_code_403)
result2 = has_cloudflare_protection(resp_with_cloudflare_protection_code_503)
self.assertTrue(result1)
self.assertTrue(result2)
def test_has_cloudflare_protection_when_there_is_no_protection(self):
resp_without_cloudflare_protection1 = FakeResponse(
code=self.code_200,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection2 = FakeResponse(
code=self.code_403,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection3 = FakeResponse(
code=self.code_503,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_without_cloudflare_protection1)
result2 = has_cloudflare_protection(resp_without_cloudflare_protection2)
result3 = has_cloudflare_protection(resp_without_cloudflare_protection3)
self.assertFalse(result1)
self.assertFalse(result2)
self.assertFalse(result3)
| TestValidateLinks |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 23788,
"end": 26455
} | class ____:
"""Descriptor for getting and setting the offset property.
Offset consists of two values, x and y, that a widget's position
will be adjusted by before it is rendered.
"""
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> ScalarOffset:
"""Get the offset.
Args:
obj: The ``Styles`` object.
objtype: The ``Styles`` class.
Returns:
The ``ScalarOffset`` indicating the adjustment that
will be made to widget position prior to it being rendered.
"""
return obj.get_rule(self.name, NULL_SCALAR) # type: ignore[return-value]
def __set__(
self, obj: StylesBase, offset: tuple[int | str, int | str] | ScalarOffset | None
):
"""Set the offset.
Args:
obj: The ``Styles`` class.
offset: A ScalarOffset object, or a 2-tuple of the form ``(x, y)`` indicating
the x and y offsets. When the ``tuple`` form is used, x and y can be specified
as either ``int`` or ``str``. The string format allows you to also specify
any valid scalar unit e.g. ``("0.5vw", "0.5vh")``.
Raises:
ScalarParseError: If any of the string values supplied in the 2-tuple cannot
be parsed into a Scalar. For example, if you specify a non-existent unit.
"""
_rich_traceback_omit = True
if offset is None:
if obj.clear_rule(self.name):
obj.refresh(layout=True, repaint=False)
elif isinstance(offset, ScalarOffset):
if obj.set_rule(self.name, offset):
obj.refresh(layout=True, repaint=False)
else:
x, y = offset
try:
scalar_x = (
Scalar.parse(x, Unit.WIDTH)
if isinstance(x, str)
else Scalar(float(x), Unit.CELLS, Unit.WIDTH)
)
scalar_y = (
Scalar.parse(y, Unit.HEIGHT)
if isinstance(y, str)
else Scalar(float(y), Unit.CELLS, Unit.HEIGHT)
)
except ScalarParseError as error:
raise StyleValueError(
str(error), help_text=offset_property_help_text(context="inline")
)
_offset = ScalarOffset(scalar_x, scalar_y)
if obj.set_rule(self.name, _offset):
obj.refresh(layout=True, repaint=False)
| OffsetProperty |
python | python-openxml__python-docx | src/docx/oxml/table.py | {
"start": 13682,
"end": 27628
} | class ____(BaseOxmlElement):
"""`w:tc` table cell element."""
add_p: Callable[[], CT_P]
get_or_add_tcPr: Callable[[], CT_TcPr]
p_lst: list[CT_P]
tbl_lst: list[CT_Tbl]
_insert_tbl: Callable[[CT_Tbl], CT_Tbl]
_new_p: Callable[[], CT_P]
# -- tcPr has many successors, `._insert_tcPr()` is overridden below --
tcPr: CT_TcPr | None = ZeroOrOne("w:tcPr") # pyright: ignore[reportAssignmentType]
p = OneOrMore("w:p")
tbl = OneOrMore("w:tbl")
@property
def bottom(self) -> int:
"""The row index that marks the bottom extent of the vertical span of this cell.
This is one greater than the index of the bottom-most row of the span, similar
to how a slice of the cell's rows would be specified.
"""
if self.vMerge is not None:
tc_below = self._tc_below
if tc_below is not None and tc_below.vMerge == ST_Merge.CONTINUE:
return tc_below.bottom
return self._tr_idx + 1
def clear_content(self):
"""Remove all content elements, preserving `w:tcPr` element if present.
Note that this leaves the `w:tc` element in an invalid state because it doesn't
contain at least one block-level element. It's up to the caller to add a
`w:p`child element as the last content element.
"""
# -- remove all cell inner-content except a `w:tcPr` when present. --
for e in self.xpath("./*[not(self::w:tcPr)]"):
self.remove(e)
@property
def grid_offset(self) -> int:
"""Starting offset of `tc` in the layout-grid columns of its table.
A cell in the leftmost grid-column has offset 0.
"""
grid_before = self._tr.grid_before
preceding_tc_grid_spans = sum(
tc.grid_span for tc in self.xpath("./preceding-sibling::w:tc")
)
return grid_before + preceding_tc_grid_spans
@property
def grid_span(self) -> int:
"""The integer number of columns this cell spans.
Determined by ./w:tcPr/w:gridSpan/@val, it defaults to 1.
"""
tcPr = self.tcPr
return 1 if tcPr is None else tcPr.grid_span
@grid_span.setter
def grid_span(self, value: int):
tcPr = self.get_or_add_tcPr()
tcPr.grid_span = value
@property
def inner_content_elements(self) -> list[CT_P | CT_Tbl]:
"""Generate all `w:p` and `w:tbl` elements in this document-body.
Elements appear in document order. Elements shaded by nesting in a `w:ins` or
other "wrapper" element will not be included.
"""
return self.xpath("./w:p | ./w:tbl")
def iter_block_items(self):
"""Generate a reference to each of the block-level content elements in this
cell, in the order they appear."""
block_item_tags = (qn("w:p"), qn("w:tbl"), qn("w:sdt"))
for child in self:
if child.tag in block_item_tags:
yield child
@property
def left(self) -> int:
"""The grid column index at which this ``<w:tc>`` element appears."""
return self.grid_offset
def merge(self, other_tc: CT_Tc) -> CT_Tc:
"""Return top-left `w:tc` element of a new span.
Span is formed by merging the rectangular region defined by using this tc
element and `other_tc` as diagonal corners.
"""
top, left, height, width = self._span_dimensions(other_tc)
top_tc = self._tbl.tr_lst[top].tc_at_grid_offset(left)
top_tc._grow_to(width, height)
return top_tc
@classmethod
def new(cls) -> CT_Tc:
"""A new `w:tc` element, containing an empty paragraph as the required EG_BlockLevelElt."""
return cast(CT_Tc, parse_xml("<w:tc %s><w:p/></w:tc>" % nsdecls("w")))
@property
def right(self) -> int:
"""The grid column index that marks the right-side extent of the horizontal span
of this cell.
This is one greater than the index of the right-most column of the span, similar
to how a slice of the cell's columns would be specified.
"""
return self.grid_offset + self.grid_span
@property
def top(self) -> int:
"""The top-most row index in the vertical span of this cell."""
if self.vMerge is None or self.vMerge == ST_Merge.RESTART:
return self._tr_idx
return self._tc_above.top
@property
def vMerge(self) -> str | None:
"""Value of ./w:tcPr/w:vMerge/@val, |None| if w:vMerge is not present."""
tcPr = self.tcPr
if tcPr is None:
return None
return tcPr.vMerge_val
@vMerge.setter
def vMerge(self, value: str | None):
tcPr = self.get_or_add_tcPr()
tcPr.vMerge_val = value
@property
def width(self) -> Length | None:
"""EMU length represented in `./w:tcPr/w:tcW` or |None| if not present."""
tcPr = self.tcPr
if tcPr is None:
return None
return tcPr.width
@width.setter
def width(self, value: Length):
tcPr = self.get_or_add_tcPr()
tcPr.width = value
def _add_width_of(self, other_tc: CT_Tc):
"""Add the width of `other_tc` to this cell.
Does nothing if either this tc or `other_tc` does not have a specified width.
"""
if self.width and other_tc.width:
self.width = Length(self.width + other_tc.width)
def _grow_to(self, width: int, height: int, top_tc: CT_Tc | None = None):
"""Grow this cell to `width` grid columns and `height` rows.
This is accomplished by expanding horizontal spans and creating continuation
cells to form vertical spans.
"""
def vMerge_val(top_tc: CT_Tc):
return (
ST_Merge.CONTINUE
if top_tc is not self
else None
if height == 1
else ST_Merge.RESTART
)
top_tc = self if top_tc is None else top_tc
self._span_to_width(width, top_tc, vMerge_val(top_tc))
if height > 1:
tc_below = self._tc_below
assert tc_below is not None
tc_below._grow_to(width, height - 1, top_tc)
def _insert_tcPr(self, tcPr: CT_TcPr) -> CT_TcPr:
"""Override default `._insert_tcPr()`."""
# -- `tcPr`` has a large number of successors, but always comes first if it appears,
# -- so just using insert(0, ...) rather than spelling out successors.
self.insert(0, tcPr)
return tcPr
@property
def _is_empty(self) -> bool:
"""True if this cell contains only a single empty `w:p` element."""
block_items = list(self.iter_block_items())
if len(block_items) > 1:
return False
# -- cell must include at least one block item but can be a `w:tbl`, `w:sdt`,
# -- `w:customXml` or a `w:p`
only_item = block_items[0]
return isinstance(only_item, CT_P) and len(only_item.r_lst) == 0
def _move_content_to(self, other_tc: CT_Tc):
"""Append the content of this cell to `other_tc`.
Leaves this cell with a single empty ``<w:p>`` element.
"""
if other_tc is self:
return
if self._is_empty:
return
other_tc._remove_trailing_empty_p()
# -- appending moves each element from self to other_tc --
for block_element in self.iter_block_items():
other_tc.append(block_element)
# -- add back the required minimum single empty <w:p> element --
self.append(self._new_p())
def _new_tbl(self) -> None:
raise NotImplementedError(
"use CT_Tbl.new_tbl() to add a new table, specifying rows and columns"
)
@property
def _next_tc(self) -> CT_Tc | None:
"""The `w:tc` element immediately following this one in this row, or |None| if
this is the last `w:tc` element in the row."""
following_tcs = self.xpath("./following-sibling::w:tc")
return following_tcs[0] if following_tcs else None
def _remove(self):
"""Remove this `w:tc` element from the XML tree."""
parent_element = self.getparent()
assert parent_element is not None
parent_element.remove(self)
def _remove_trailing_empty_p(self):
"""Remove last content element from this cell if it's an empty `w:p` element."""
block_items = list(self.iter_block_items())
last_content_elm = block_items[-1]
if not isinstance(last_content_elm, CT_P):
return
p = last_content_elm
if len(p.r_lst) > 0:
return
self.remove(p)
def _span_dimensions(self, other_tc: CT_Tc) -> tuple[int, int, int, int]:
"""Return a (top, left, height, width) 4-tuple specifying the extents of the
merged cell formed by using this tc and `other_tc` as opposite corner
extents."""
def raise_on_inverted_L(a: CT_Tc, b: CT_Tc):
if a.top == b.top and a.bottom != b.bottom:
raise InvalidSpanError("requested span not rectangular")
if a.left == b.left and a.right != b.right:
raise InvalidSpanError("requested span not rectangular")
def raise_on_tee_shaped(a: CT_Tc, b: CT_Tc):
top_most, other = (a, b) if a.top < b.top else (b, a)
if top_most.top < other.top and top_most.bottom > other.bottom:
raise InvalidSpanError("requested span not rectangular")
left_most, other = (a, b) if a.left < b.left else (b, a)
if left_most.left < other.left and left_most.right > other.right:
raise InvalidSpanError("requested span not rectangular")
raise_on_inverted_L(self, other_tc)
raise_on_tee_shaped(self, other_tc)
top = min(self.top, other_tc.top)
left = min(self.left, other_tc.left)
bottom = max(self.bottom, other_tc.bottom)
right = max(self.right, other_tc.right)
return top, left, bottom - top, right - left
def _span_to_width(self, grid_width: int, top_tc: CT_Tc, vMerge: str | None):
"""Incorporate `w:tc` elements to the right until this cell spans `grid_width`.
Incorporated `w:tc` elements are removed (replaced by gridSpan value).
Raises |ValueError| if `grid_width` cannot be exactly achieved, such as when a
merged cell would drive the span width greater than `grid_width` or if not
enough grid columns are available to make this cell that wide. All content from
incorporated cells is appended to `top_tc`. The val attribute of the vMerge
element on the single remaining cell is set to `vMerge`. If `vMerge` is |None|,
the vMerge element is removed if present.
"""
self._move_content_to(top_tc)
while self.grid_span < grid_width:
self._swallow_next_tc(grid_width, top_tc)
self.vMerge = vMerge
def _swallow_next_tc(self, grid_width: int, top_tc: CT_Tc):
"""Extend the horizontal span of this `w:tc` element to incorporate the
following `w:tc` element in the row and then delete that following `w:tc`
element.
Any content in the following `w:tc` element is appended to the content of
`top_tc`. The width of the following `w:tc` element is added to this one, if
present. Raises |InvalidSpanError| if the width of the resulting cell is greater
than `grid_width` or if there is no next `<w:tc>` element in the row.
"""
def raise_on_invalid_swallow(next_tc: CT_Tc | None):
if next_tc is None:
raise InvalidSpanError("not enough grid columns")
if self.grid_span + next_tc.grid_span > grid_width:
raise InvalidSpanError("span is not rectangular")
next_tc = self._next_tc
raise_on_invalid_swallow(next_tc)
assert next_tc is not None
next_tc._move_content_to(top_tc)
self._add_width_of(next_tc)
self.grid_span += next_tc.grid_span
next_tc._remove()
@property
def _tbl(self) -> CT_Tbl:
"""The tbl element this tc element appears in."""
return cast(CT_Tbl, self.xpath("./ancestor::w:tbl[position()=1]")[0])
@property
def _tc_above(self) -> CT_Tc:
"""The `w:tc` element immediately above this one in its grid column."""
return self._tr_above.tc_at_grid_offset(self.grid_offset)
@property
def _tc_below(self) -> CT_Tc | None:
"""The tc element immediately below this one in its grid column."""
tr_below = self._tr_below
if tr_below is None:
return None
return tr_below.tc_at_grid_offset(self.grid_offset)
@property
def _tr(self) -> CT_Row:
"""The tr element this tc element appears in."""
return cast(CT_Row, self.xpath("./ancestor::w:tr[position()=1]")[0])
@property
def _tr_above(self) -> CT_Row:
"""The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
"""
tr_aboves = self.xpath("./ancestor::w:tr[position()=1]/preceding-sibling::w:tr[1]")
if not tr_aboves:
raise ValueError("no tr above topmost tr in w:tbl")
return tr_aboves[0]
@property
def _tr_below(self) -> CT_Row | None:
"""The tr element next in sequence after the tr this cell appears in, or |None|
if this cell appears in the last row."""
tr_lst = self._tbl.tr_lst
tr_idx = tr_lst.index(self._tr)
try:
return tr_lst[tr_idx + 1]
except IndexError:
return None
@property
def _tr_idx(self) -> int:
"""The row index of the tr element this tc element appears in."""
return self._tbl.tr_lst.index(self._tr)
| CT_Tc |
python | davidhalter__parso | parso/normalizer.py | {
"start": 3132,
"end": 3354
} | class ____:
normalizer_class = Normalizer
def create_normalizer(self, grammar):
if self.normalizer_class is None:
return None
return self.normalizer_class(grammar, self)
| NormalizerConfig |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 21100,
"end": 25184
} | class ____(GoogleCloudBaseOperator):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation. When
complete, the instance will contain only data from the imported file.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreImportOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"input_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
input_config: dict | InputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.input_config = input_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance,
"location_id": self.location,
}
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.import_instance(
location=self.location,
instance=self.instance,
input_config=self.input_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
| CloudMemorystoreImportOperator |
python | dateutil__dateutil | tests/test_tz.py | {
"start": 80718,
"end": 85384
} | class ____(unittest.TestCase, TzWinFoldMixin):
def setUp(self):
self.tzclass = tzwin.tzwinlocal
self.context = TZWinContext
def get_args(self, tzname):
return ()
def testLocal(self):
# Not sure how to pin a local time zone, so for now we're just going
# to run this and make sure it doesn't raise an error
# See GitHub Issue #135: https://github.com/dateutil/dateutil/issues/135
datetime.now(tzwin.tzwinlocal())
def testTzwinLocalUTCOffset(self):
with TZWinContext('Eastern Standard Time'):
tzwl = tzwin.tzwinlocal()
self.assertEqual(datetime(2014, 3, 11, tzinfo=tzwl).utcoffset(),
timedelta(hours=-4))
def testTzwinLocalName(self):
# https://github.com/dateutil/dateutil/issues/143
ESTs = 'Eastern Standard Time'
EDTs = 'Eastern Daylight Time'
transition_dates = [(datetime(2015, 3, 8, 0, 59), ESTs),
(datetime(2015, 3, 8, 3, 1), EDTs),
(datetime(2015, 11, 1, 0, 59), EDTs),
(datetime(2015, 11, 1, 3, 1), ESTs),
(datetime(2016, 3, 13, 0, 59), ESTs),
(datetime(2016, 3, 13, 3, 1), EDTs),
(datetime(2016, 11, 6, 0, 59), EDTs),
(datetime(2016, 11, 6, 3, 1), ESTs)]
with TZWinContext('Eastern Standard Time'):
tw = tz.tzwinlocal()
for t_date, expected in transition_dates:
self.assertEqual(t_date.replace(tzinfo=tw).tzname(), expected)
def testTzWinLocalRepr(self):
tw = tz.tzwinlocal()
self.assertEqual(repr(tw), 'tzwinlocal()')
def testTzwinLocalRepr(self):
# https://github.com/dateutil/dateutil/issues/143
with TZWinContext('Eastern Standard Time'):
tw = tz.tzwinlocal()
self.assertEqual(str(tw), 'tzwinlocal(' +
repr('Eastern Standard Time') + ')')
with TZWinContext('Pacific Standard Time'):
tw = tz.tzwinlocal()
self.assertEqual(str(tw), 'tzwinlocal(' +
repr('Pacific Standard Time') + ')')
def testTzwinLocalEquality(self):
tw_est = tz.tzwin('Eastern Standard Time')
tw_pst = tz.tzwin('Pacific Standard Time')
with TZWinContext('Eastern Standard Time'):
twl1 = tz.tzwinlocal()
twl2 = tz.tzwinlocal()
self.assertEqual(twl1, twl2)
self.assertEqual(twl1, tw_est)
self.assertNotEqual(twl1, tw_pst)
with TZWinContext('Pacific Standard Time'):
twl1 = tz.tzwinlocal()
twl2 = tz.tzwinlocal()
tw = tz.tzwin('Pacific Standard Time')
self.assertEqual(twl1, twl2)
self.assertEqual(twl1, tw)
self.assertEqual(twl1, tw_pst)
self.assertNotEqual(twl1, tw_est)
def testTzwinLocalTimeOnlyDST(self):
# For zones with DST, .dst() should return None
with TZWinContext('Eastern Standard Time'):
twl = tz.tzwinlocal()
self.assertIs(dt_time(14, 10, tzinfo=twl).dst(), None)
# This zone has no DST, so .dst() can return 0
with TZWinContext('South Africa Standard Time'):
twl = tz.tzwinlocal()
self.assertEqual(dt_time(14, 10, tzinfo=twl).dst(), timedelta(0))
def testTzwinLocalTimeOnlyUTCOffset(self):
# For zones with DST, .utcoffset() should return None
with TZWinContext('Eastern Standard Time'):
twl = tz.tzwinlocal()
self.assertIs(dt_time(14, 10, tzinfo=twl).utcoffset(), None)
# This zone has no DST, so .utcoffset() returns standard offset
with TZWinContext('South Africa Standard Time'):
twl = tz.tzwinlocal()
self.assertEqual(dt_time(14, 10, tzinfo=twl).utcoffset(),
timedelta(hours=2))
def testTzwinLocalTimeOnlyTZName(self):
# For zones with DST, the name defaults to standard time
with TZWinContext('Eastern Standard Time'):
twl = tz.tzwinlocal()
self.assertEqual(dt_time(14, 10, tzinfo=twl).tzname(),
'Eastern Standard Time')
# For zones with no DST, this should work normally.
with TZWinContext('South Africa Standard Time'):
twl = tz.tzwinlocal()
self.assertEqual(dt_time(14, 10, tzinfo=twl).tzname(),
'South Africa Standard Time')
| TzWinLocalTest |
python | sqlalchemy__sqlalchemy | examples/sharding/separate_tables.py | {
"start": 2959,
"end": 3354
} | class ____(Base):
__tablename__ = "_prefix__weather_locations"
id: Mapped[int] = mapped_column(primary_key=True, default=id_generator)
continent: Mapped[str]
city: Mapped[str]
reports: Mapped[list[Report]] = relationship(back_populates="location")
def __init__(self, continent: str, city: str):
self.continent = continent
self.city = city
| WeatherLocation |
python | apache__airflow | providers/apache/druid/src/airflow/providers/apache/druid/hooks/druid.py | {
"start": 1465,
"end": 7568
} | class ____(BaseHook):
"""
Connection to Druid overlord for ingestion.
To connect to a Druid cluster that is secured with the druid-basic-security
extension, add the username and password to the druid ingestion connection.
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:param timeout: The interval between polling
the Druid job for the status of the ingestion job.
Must be greater than or equal to 1
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:param verify_ssl: Whether to use SSL encryption to submit indexing job. If set to False then checks
connection information for path to a CA bundle to use. Defaults to True
"""
def __init__(
self,
druid_ingest_conn_id: str = "druid_ingest_default",
timeout: int = 1,
max_ingestion_time: int | None = None,
verify_ssl: bool = True,
) -> None:
super().__init__()
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {"content-type": "application/json"}
self.verify_ssl = verify_ssl
if self.timeout < 1:
raise ValueError("Druid timeout should be equal or greater than 1")
self.status_endpoint = "druid/indexer/v1/task"
@cached_property
def conn(self) -> Connection:
return self.get_connection(self.druid_ingest_conn_id) # type: ignore[return-value]
@property
def get_connection_type(self) -> str:
if self.conn.schema:
conn_type = self.conn.schema
else:
conn_type = self.conn.conn_type or "http"
return conn_type
def get_conn_url(self, ingestion_type: IngestionType = IngestionType.BATCH) -> str:
"""Get Druid connection url."""
host = self.conn.host
port = self.conn.port
conn_type = self.get_connection_type
if ingestion_type == IngestionType.BATCH:
endpoint = self.conn.extra_dejson.get("endpoint", "")
else:
endpoint = self.conn.extra_dejson.get("msq_endpoint", "")
return f"{conn_type}://{host}:{port}/{endpoint}"
def get_status_url(self, ingestion_type):
"""Return Druid status url."""
if ingestion_type == IngestionType.MSQ:
if self.get_connection_type == "druid":
conn_type = self.conn.extra_dejson.get("schema", "http")
else:
conn_type = self.get_connection_type
status_endpoint = self.conn.extra_dejson.get("status_endpoint", self.status_endpoint)
return f"{conn_type}://{self.conn.host}:{self.conn.port}/{status_endpoint}"
return self.get_conn_url(ingestion_type)
def get_auth(self) -> requests.auth.HTTPBasicAuth | None:
"""
Return username and password from connections tab as requests.auth.HTTPBasicAuth object.
If these details have not been set then returns None.
"""
user = self.conn.login
password = self.conn.password
if user is not None and password is not None:
return requests.auth.HTTPBasicAuth(user, password)
return None
def get_verify(self) -> bool | str:
ca_bundle_path: str | None = self.conn.extra_dejson.get("ca_bundle_path", None)
if not self.verify_ssl and ca_bundle_path:
self.log.info("Using CA bundle to verify connection")
return ca_bundle_path
return self.verify_ssl
def submit_indexing_job(
self, json_index_spec: dict[str, Any] | str, ingestion_type: IngestionType = IngestionType.BATCH
) -> None:
"""Submit Druid ingestion job."""
url = self.get_conn_url(ingestion_type)
self.log.info("Druid ingestion spec: %s", json_index_spec)
req_index = requests.post(
url, data=json_index_spec, headers=self.header, auth=self.get_auth(), verify=self.get_verify()
)
code = req_index.status_code
not_accepted = not (200 <= code < 300)
if not_accepted:
self.log.error("Error submitting the Druid job to %s (%s) %s", url, code, req_index.content)
raise AirflowException(f"Did not get 200 or 202 when submitting the Druid job to {url}")
req_json = req_index.json()
# Wait until the job is completed
if ingestion_type == IngestionType.BATCH:
druid_task_id = req_json["task"]
else:
druid_task_id = req_json["taskId"]
druid_task_status_url = self.get_status_url(ingestion_type) + f"/{druid_task_id}/status"
self.log.info("Druid indexing task-id: %s", druid_task_id)
running = True
sec = 0
while running:
req_status = requests.get(druid_task_status_url, auth=self.get_auth(), verify=self.get_verify())
self.log.info("Job still running for %s seconds...", sec)
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post(
f"{url}/{druid_task_id}/shutdown", auth=self.get_auth(), verify=self.get_verify()
)
raise AirflowException(f"Druid ingestion took more than {self.max_ingestion_time} seconds")
time.sleep(self.timeout)
sec += self.timeout
status = req_status.json()["status"]["status"]
if status == "RUNNING":
running = True
elif status == "SUCCESS":
running = False # Great success!
elif status == "FAILED":
raise AirflowException("Druid indexing job failed, check console for more info")
else:
raise AirflowException(f"Could not get status of the job, got {status}")
self.log.info("Successful index")
| DruidHook |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-coins-for-fruits-ii.py | {
"start": 788,
"end": 1263
} | class ____(object):
def minimumCoins(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
dp = [float("inf")]*(len(prices)+1)
dp[0] = 0
sl = SortedList()
j = 0
for i in xrange(len(prices)):
sl.add((dp[i]+prices[i], i))
while j+(j+1) < i:
sl.remove(((dp[j]+prices[j], j)))
j += 1
dp[i+1] = sl[0][0]
return dp[-1]
| Solution2 |
python | bokeh__bokeh | tests/unit/bokeh/application/test_application.py | {
"start": 7679,
"end": 8154
} | class ____:
def test_abstract(self) -> None:
with pytest.raises(TypeError):
baa.SessionContext()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_SessionContext |
python | openai__openai-python | src/openai/types/beta/realtime/response_create_event.py | {
"start": 4427,
"end": 4763
} | class ____(BaseModel):
type: Literal["response.create"]
"""The event type, must be `response.create`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
response: Optional[Response] = None
"""Create a new Realtime response with these parameters"""
| ResponseCreateEvent |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 9147,
"end": 9732
} | class ____(WeaviateBaseError):
"""Is raised when inserting an invalid property."""
def __init__(self, data: dict):
msg = f"""It is forbidden to insert `id` or `vector` inside properties: {data}. Only properties defined in your collection's config can be inserted as properties of the object, `id` is totally forbidden as it is reserved and `vector` is forbidden at this level. You should use the `DataObject` class if you wish to insert an object with a custom `vector` whilst inserting its properties."""
super().__init__(msg)
| WeaviateInsertInvalidPropertyError |
python | google__jax | jax/_src/config.py | {
"start": 34960,
"end": 35309
} | class ____:
def __init__(self, default_value):
self._obj = config_ext.Config("user_context", default_value, include_in_jit_key=True,
include_in_trace_context=True)
@property
def value(self):
return self._obj.value
def __call__(self, new_value):
return UserContext(self._obj, new_value)
| UserConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/graph_definition.py | {
"start": 4828,
"end": 42026
} | class ____(NodeDefinition):
"""Defines a Dagster op graph.
An op graph is made up of
- Nodes, which can either be an op (the functional unit of computation), or another graph.
- Dependencies, which determine how the values produced by nodes as outputs flow from
one node to another. This tells Dagster how to arrange nodes into a directed, acyclic graph
(DAG) of compute.
End users should prefer the :func:`@graph <graph>` decorator. GraphDefinition is generally
intended to be used by framework authors or for programatically generated graphs.
Args:
name (str): The name of the graph. Must be unique within any :py:class:`GraphDefinition`
or :py:class:`JobDefinition` containing the graph.
description (Optional[str]): A human-readable description of the job.
node_defs (Optional[Sequence[NodeDefinition]]): The set of ops / graphs used in this graph.
dependencies (Optional[Dict[Union[str, NodeInvocation], Dict[str, DependencyDefinition]]]):
A structure that declares the dependencies of each op's inputs on the outputs of other
ops in the graph. Keys of the top level dict are either the string names of ops in the
graph or, in the case of aliased ops, :py:class:`NodeInvocations <NodeInvocation>`.
Values of the top level dict are themselves dicts, which map input names belonging to
the op or aliased op to :py:class:`DependencyDefinitions <DependencyDefinition>`.
input_mappings (Optional[Sequence[InputMapping]]): Defines the inputs to the nested graph, and
how they map to the inputs of its constituent ops.
output_mappings (Optional[Sequence[OutputMapping]]): Defines the outputs of the nested graph,
and how they map from the outputs of its constituent ops.
config (Optional[ConfigMapping]): Defines the config of the graph, and how its schema maps
to the config of its constituent ops.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution of the graph.
Values that are not strings will be json encoded and must meet the criteria that
`json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag
values provided at invocation time.
composition_fn (Optional[Callable]): The function that defines this graph. Used to generate
code references for this graph.
Examples:
.. code-block:: python
@op
def return_one():
return 1
@op
def add_one(num):
return num + 1
graph_def = GraphDefinition(
name='basic',
node_defs=[return_one, add_one],
dependencies={'add_one': {'num': DependencyDefinition('return_one')}},
)
"""
_node_defs: Sequence[NodeDefinition]
_dagster_type_dict: Mapping[str, DagsterType]
_dependencies: DependencyMapping[NodeInvocation]
_dependency_structure: DependencyStructure
_node_dict: Mapping[str, Node]
_input_mappings: Sequence[InputMapping]
_output_mappings: Sequence[OutputMapping]
_config_mapping: Optional[ConfigMapping]
_nodes_in_topological_order: Sequence[Node]
# (node name within the graph -> (input name -> AssetsDefinition to load that input from))
# Does NOT include keys for:
# - Inputs to the graph itself
# - Inputs to nodes within sub-graphs of the graph
_input_assets: Mapping[str, Mapping[str, "AssetsDefinition"]]
def __init__(
self,
name: str,
*,
description: Optional[str] = None,
node_defs: Optional[Sequence[NodeDefinition]] = None,
dependencies: Optional[
Union[DependencyMapping[str], DependencyMapping[NodeInvocation]]
] = None,
input_mappings: Optional[Sequence[InputMapping]] = None,
output_mappings: Optional[Sequence[OutputMapping]] = None,
config: Optional[ConfigMapping] = None,
tags: Optional[Mapping[str, str]] = None,
node_input_source_assets: Optional[Mapping[str, Mapping[str, "SourceAsset"]]] = None,
input_assets: Optional[
Mapping[str, Mapping[str, Union["AssetsDefinition", "SourceAsset"]]]
] = None,
composition_fn: Optional[Callable] = None,
**kwargs: Any,
):
from dagster._core.definitions.external_asset import create_external_asset_from_source_asset
from dagster._core.definitions.source_asset import SourceAsset
self._node_defs = _check_node_defs_arg(name, node_defs)
# `dependencies` will be converted to `dependency_structure` and `node_dict`, which may
# alternatively be passed directly (useful when copying)
self._dependencies = normalize_dependency_dict(dependencies)
self._dependency_structure, self._node_dict = create_execution_structure(
self._node_defs, self._dependencies, graph_definition=self
)
# Sequence[InputMapping]
self._input_mappings = check.opt_sequence_param(input_mappings, "input_mappings")
input_defs = _validate_in_mappings(
self._input_mappings,
self._node_dict,
self._dependency_structure,
name,
class_name=type(self).__name__,
)
# Sequence[OutputMapping]
self._output_mappings, output_defs = _validate_out_mappings(
check.opt_sequence_param(output_mappings, "output_mappings"),
self._node_dict,
name,
class_name=type(self).__name__,
)
self._config_mapping = check.opt_inst_param(config, "config", ConfigMapping)
self._composition_fn = check.opt_callable_param(composition_fn, "composition_fn")
super().__init__(
name=name,
description=description,
input_defs=input_defs,
output_defs=output_defs,
tags=tags,
**kwargs,
)
# must happen after base class construction as properties are assumed to be there
# eager computation to detect cycles
self._nodes_in_topological_order = self._get_nodes_in_topological_order()
self._dagster_type_dict = construct_dagster_type_dictionary([self])
# Backcompat: the previous API `node_input_source_assets` with a Dict[str, Dict[str,
# SourceAsset]]. The new API is `input_assets` and accepts external assets as well as
# SourceAsset.
self._input_assets = {}
input_assets = check.opt_mapping_param(
normalize_renamed_param(
new_val=input_assets,
new_arg="input_assets",
old_val=node_input_source_assets,
old_arg="node_input_source_assets",
),
"input_assets",
key_type=str,
value_type=dict,
)
for node_name, inputs in input_assets.items():
self._input_assets[node_name] = {
input_name: (
create_external_asset_from_source_asset(asset)
if isinstance(asset, SourceAsset)
else asset
)
for input_name, asset in inputs.items()
}
def _get_nodes_in_topological_order(self) -> Sequence[Node]:
_forward_edges, backward_edges = create_adjacency_lists(
self.nodes, self.dependency_structure
)
try:
order = toposort_flatten(backward_edges)
except CircularDependencyError as err:
raise DagsterInvalidDefinitionError(str(err)) from err
return [self.node_named(node_name) for node_name in order]
def get_inputs_must_be_resolved_top_level(
self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None
) -> Sequence[InputDefinition]:
unresolveable_input_defs: list[InputDefinition] = []
for node in self.node_dict.values():
cur_handle = NodeHandle(node.name, handle)
for input_def in node.definition.get_inputs_must_be_resolved_top_level(
asset_layer, cur_handle
):
if self.dependency_structure.has_deps(NodeInput(node, input_def)):
continue
elif not node.container_maps_input(input_def.name):
raise DagsterInvalidDefinitionError(
f"Input '{input_def.name}' of {node.describe_node()} "
"has no way of being resolved. Must provide a resolution to this "
"input via another op/graph, or via a direct input value mapped from the "
"top-level graph. To "
"learn more, see the docs for unconnected inputs: "
"https://legacy-docs.dagster.io/concepts/io-management/unconnected-inputs#unconnected-inputs."
)
else:
mapped_input = node.container_mapped_input(input_def.name)
unresolveable_input_defs.append(mapped_input.get_definition())
return unresolveable_input_defs
@property
def node_type_str(self) -> str:
return "graph"
@property
def is_graph_job_op_node(self) -> bool:
return True
@property
def nodes(self) -> Sequence[Node]:
return list(set(self._node_dict.values()))
@property
def node_dict(self) -> Mapping[str, Node]:
return self._node_dict
@property
def node_defs(self) -> Sequence[NodeDefinition]:
return self._node_defs
@property
def nodes_in_topological_order(self) -> Sequence[Node]:
return self._nodes_in_topological_order
@property
def input_assets(self) -> Mapping[str, Mapping[str, "AssetsDefinition"]]:
return self._input_assets
@property
def composition_fn(self) -> Optional[Callable]:
return self._composition_fn
def has_node_named(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._node_dict
def node_named(self, name: str) -> Node:
check.str_param(name, "name")
if name not in self._node_dict:
raise DagsterInvariantViolationError(f"{self._name} has no op named {name}.")
return self._node_dict[name]
def get_node(self, handle: NodeHandle) -> Node:
check.inst_param(handle, "handle", NodeHandle)
current = handle
lineage: list[str] = []
while current:
lineage.append(current.name)
current = current.parent
name = lineage.pop()
node = self.node_named(name)
while lineage:
name = lineage.pop()
# We know that this is a current node is a graph while ascending lineage
definition = cast("GraphDefinition", node.definition)
node = definition.node_named(name)
return node
def iterate_node_defs(self) -> Iterator[NodeDefinition]:
yield self
for outer_node_def in self._node_defs:
yield from outer_node_def.iterate_node_defs()
def iterate_op_defs(self) -> Iterator["OpDefinition"]:
for outer_node_def in self._node_defs:
yield from outer_node_def.iterate_op_defs()
def iterate_node_handles(
self, parent_node_handle: Optional[NodeHandle] = None
) -> Iterator[NodeHandle]:
for node in self.node_dict.values():
cur_node_handle = NodeHandle(node.name, parent_node_handle)
if isinstance(node, GraphNode):
yield from node.definition.iterate_node_handles(cur_node_handle)
yield cur_node_handle
@public
@property
def input_mappings(self) -> Sequence[InputMapping]:
"""Input mappings for the graph.
An input mapping is a mapping from an input of the graph to an input of a child node.
"""
return self._input_mappings
@public
@property
def output_mappings(self) -> Sequence[OutputMapping]:
"""Output mappings for the graph.
An output mapping is a mapping from an output of the graph to an output of a child node.
"""
return self._output_mappings
@public
@property
def config_mapping(self) -> Optional[ConfigMapping]:
"""The config mapping for the graph, if present.
By specifying a config mapping function, you can override the configuration for the child nodes contained within a graph.
"""
return self._config_mapping
@property
def has_config_mapping(self) -> bool:
return self._config_mapping is not None
def all_dagster_types(self) -> Iterable[DagsterType]:
return self._dagster_type_dict.values()
def has_dagster_type(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._dagster_type_dict
def dagster_type_named(self, name: str) -> DagsterType:
check.str_param(name, "name")
return self._dagster_type_dict[name]
def get_input_mapping(self, input_name: str) -> InputMapping:
check.str_param(input_name, "input_name")
for mapping in self._input_mappings:
if mapping.graph_input_name == input_name:
return mapping
check.failed(f"Could not find input mapping {input_name}")
def input_mapping_for_pointer(
self, pointer: Union[InputPointer, FanInInputPointer]
) -> Optional[InputMapping]:
check.inst_param(pointer, "pointer", (InputPointer, FanInInputPointer))
for mapping in self._input_mappings:
if mapping.maps_to == pointer:
return mapping
return None
def get_output_mapping(self, output_name: str) -> OutputMapping:
check.str_param(output_name, "output_name")
for mapping in self._output_mappings:
if mapping.graph_output_name == output_name:
return mapping
check.failed(f"Could not find output mapping {output_name}")
def resolve_output_to_origin(
self, output_name: str, handle: Optional[NodeHandle]
) -> tuple[OutputDefinition, Optional[NodeHandle]]:
check.str_param(output_name, "output_name")
check.opt_inst_param(handle, "handle", NodeHandle)
mapping = self.get_output_mapping(output_name)
check.invariant(mapping, "Can only resolve outputs for valid output names")
mapped_node = self.node_named(mapping.maps_from.node_name)
return mapped_node.definition.resolve_output_to_origin(
mapping.maps_from.output_name,
NodeHandle(mapped_node.name, handle),
)
def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":
mapping = self.get_output_mapping(output_name)
check.invariant(mapping, "Can only resolve outputs for valid output names")
return self.node_named(
mapping.maps_from.node_name
).definition.resolve_output_to_origin_op_def(output_name)
def default_value_for_input(self, input_name: str) -> object:
check.str_param(input_name, "input_name")
# base case
if self.input_def_named(input_name).has_default_value:
return self.input_def_named(input_name).default_value
mapping = self.get_input_mapping(input_name)
check.invariant(mapping, "Can only resolve inputs for valid input names")
mapped_node = self.node_named(mapping.maps_to.node_name)
return mapped_node.definition.default_value_for_input(mapping.maps_to.input_name)
def input_has_default(self, input_name: str) -> bool:
check.str_param(input_name, "input_name")
# base case
if self.input_def_named(input_name).has_default_value:
return True
mapping = self.get_input_mapping(input_name)
check.invariant(mapping, "Can only resolve inputs for valid input names")
mapped_node = self.node_named(mapping.maps_to.node_name)
return mapped_node.definition.input_has_default(mapping.maps_to.input_name)
@property
def dependencies(self) -> DependencyMapping[NodeInvocation]:
return self._dependencies
@property
def dependency_structure(self) -> DependencyStructure:
return self._dependency_structure
@property
def config_schema(self) -> Optional[IDefinitionConfigSchema]:
return self.config_mapping.config_schema if self.config_mapping is not None else None
def input_supports_dynamic_output_dep(self, input_name: str) -> bool:
mapping = self.get_input_mapping(input_name)
target_node = mapping.maps_to.node_name
# check if input mapped to node which is downstream of another dynamic output within
if self.dependency_structure.is_dynamic_mapped(target_node):
return False
# check if input mapped to node which starts new dynamic downstream
if self.dependency_structure.has_dynamic_downstreams(target_node):
return False
return self.node_named(target_node).definition.input_supports_dynamic_output_dep(
mapping.maps_to.input_name
)
def copy(
self,
name: Optional[str] = None,
description: Optional[str] = None,
input_mappings: Optional[Sequence[InputMapping]] = None,
output_mappings: Optional[Sequence[OutputMapping]] = None,
config: Optional[ConfigMapping] = None,
tags: Optional[Mapping[str, str]] = None,
input_assets: Optional[Mapping[str, Mapping[str, "AssetsDefinition"]]] = None,
) -> Self:
return self.__class__(
node_defs=self.node_defs,
dependencies=self.dependencies,
name=name or self.name,
description=description or self.description,
input_mappings=input_mappings or self._input_mappings,
output_mappings=output_mappings or self._output_mappings,
config=config or self.config_mapping,
tags=tags or self.tags,
input_assets=input_assets or self._input_assets,
)
def copy_for_configured(
self,
name: str,
description: Optional[str],
config_schema: Any,
) -> Self:
if not self.has_config_mapping:
raise DagsterInvalidDefinitionError(
"Only graphs utilizing config mapping can be pre-configured. The graph "
f'"{self.name}" does not have a config mapping, and thus has nothing to be '
"configured."
)
config_mapping = cast("ConfigMapping", self.config_mapping)
return self.copy(
name=name,
description=check.opt_str_param(description, "description", default=self.description),
config=ConfigMapping(
config_mapping.config_fn,
config_schema=config_schema,
receive_processed_config_values=config_mapping.receive_processed_config_values,
),
)
def node_names(self) -> Sequence[str]:
return list(self._node_dict.keys())
@public
@beta_param(param="owners")
def to_job(
self,
name: Optional[str] = None,
description: Optional[str] = None,
resource_defs: Optional[Mapping[str, object]] = None,
config: Optional[
Union["RunConfig", ConfigMapping, Mapping[str, object], "PartitionedConfig"]
] = None,
tags: Optional[Mapping[str, object]] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,
executor_def: Optional["ExecutorDefinition"] = None,
hooks: Optional[AbstractSet[HookDefinition]] = None,
op_retry_policy: Optional[RetryPolicy] = None,
op_selection: Optional[Sequence[str]] = None,
partitions_def: Optional["PartitionsDefinition"] = None,
asset_layer: Optional["AssetLayer"] = None,
input_values: Optional[Mapping[str, object]] = None,
run_tags: Optional[Mapping[str, object]] = None,
_asset_selection_data: Optional[AssetSelectionData] = None,
owners: Optional[Sequence[str]] = None,
) -> "JobDefinition":
"""Make this graph in to an executable Job by providing remaining components required for execution.
Args:
name (Optional[str]):
The name for the Job. Defaults to the name of the this graph.
resource_defs (Optional[Mapping [str, object]]):
Resources that are required by this graph for execution.
If not defined, `io_manager` will default to filesystem.
config:
Describes how the job is parameterized at runtime.
If no value is provided, then the schema for the job's run config is a standard
format based on its ops and resources.
If a dictionary is provided, then it must conform to the standard config schema, and
it will be used as the job's run config for the job whenever the job is executed.
The values provided will be viewable and editable in the Dagster UI, so be
careful with secrets.
If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is
determined by the config mapping, and the ConfigMapping, which should return
configuration in the standard format to configure the job.
If a :py:class:`PartitionedConfig` object is provided, then it defines a discrete set of config
values that can parameterize the job, as well as a function for mapping those
values to the base config. The values provided will be viewable and editable in the
Dagster UI, so be careful with secrets.
tags (Optional[Mapping[str, object]]): A set of key-value tags that annotate the job and can
be used for searching and filtering in the UI. Values that are not already strings will
be serialized as JSON. If `run_tags` is not set, then the content of `tags` will also be
automatically appended to the tags of any runs of this job.
run_tags (Optional[Mapping[str, object]]):
A set of key-value tags that will be automatically attached to runs launched by this
job. Values that are not already strings will be serialized as JSON. These tag values
may be overwritten by tag values provided at invocation time. If `run_tags` is set, then
`tags` are not automatically appended to the tags of any runs of this job.
metadata (Optional[Mapping[str, RawMetadataValue]]):
Arbitrary information that will be attached to the JobDefinition and be viewable in the Dagster UI.
Keys must be strings, and values must be python primitive types or one of the provided
MetadataValue types
logger_defs (Optional[Mapping[str, LoggerDefinition]]):
A dictionary of string logger identifiers to their implementations.
executor_def (Optional[ExecutorDefinition]):
How this Job will be executed. Defaults to :py:class:`multi_or_in_process_executor`,
which can be switched between multi-process and in-process modes of execution. The
default mode of execution is multi-process.
op_retry_policy (Optional[RetryPolicy]): The default retry policy for all ops in this job.
Only used if retry policy is not defined on the op definition or op invocation.
partitions_def (Optional[PartitionsDefinition]): Defines a discrete set of partition
keys that can parameterize the job. If this argument is supplied, the config
argument can't also be supplied.
asset_layer (Optional[AssetLayer]): Top level information about the assets this job
will produce. Generally should not be set manually.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of a job.
owners (Optional[Sequence[str]]): A sequence of strings identifying the owners of the job.
Returns:
JobDefinition
"""
from dagster._core.definitions.job_definition import JobDefinition
from dagster._core.execution.build_resources import wrap_resources_for_execution
wrapped_resource_defs = wrap_resources_for_execution(resource_defs)
return JobDefinition.dagster_internal_init(
name=name,
description=description or self.description,
graph_def=self,
resource_defs=wrapped_resource_defs,
logger_defs=logger_defs,
executor_def=executor_def,
config=config,
partitions_def=partitions_def,
tags=tags,
run_tags=run_tags,
metadata=metadata,
hook_defs=hooks,
op_retry_policy=op_retry_policy,
asset_layer=asset_layer,
input_values=input_values,
_subset_selection_data=_asset_selection_data,
_was_explicitly_provided_resources=None, # None means this is determined by whether resource_defs contains any explicitly provided resources
owners=owners,
).get_subset(op_selection=op_selection)
def coerce_to_job(self) -> "JobDefinition":
# attempt to coerce a Graph in to a Job, raising a useful error if it doesn't work
try:
return self.to_job()
except DagsterInvalidDefinitionError as err:
raise DagsterInvalidDefinitionError(
f"Failed attempting to coerce Graph {self.name} in to a Job. "
"Use to_job instead, passing the required information."
) from err
@public
def execute_in_process(
self,
run_config: Any = None,
instance: Optional["DagsterInstance"] = None,
resources: Optional[Mapping[str, object]] = None,
raise_on_error: bool = True,
op_selection: Optional[Sequence[str]] = None,
run_id: Optional[str] = None,
input_values: Optional[Mapping[str, object]] = None,
) -> "ExecuteInProcessResult":
"""Execute this graph in-process, collecting results in-memory.
Args:
run_config (Optional[Mapping[str, Any]]):
Run config to provide to execution. The configuration for the underlying graph
should exist under the "ops" key.
instance (Optional[DagsterInstance]):
The instance to execute against, an ephemeral one will be used if none provided.
resources (Optional[Mapping[str, Any]]):
The resources needed if any are required. Can provide resource instances directly,
or resource definitions.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``.
op_selection (Optional[List[str]]): A list of op selection queries (including single op
names) to execute. For example:
* ``['some_op']``: selects ``some_op`` itself.
* ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).
* ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
* ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its
ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of the graph.
Returns:
:py:class:`~dagster.ExecuteInProcessResult`
"""
from dagster._core.definitions.executor_definition import execute_in_process_executor
from dagster._core.definitions.job_definition import JobDefinition
from dagster._core.execution.build_resources import wrap_resources_for_execution
from dagster._core.instance import DagsterInstance
instance = check.opt_inst_param(instance, "instance", DagsterInstance)
resources = check.opt_mapping_param(resources, "resources", key_type=str)
input_values = check.opt_mapping_param(input_values, "input_values")
resource_defs = wrap_resources_for_execution(resources)
ephemeral_job = JobDefinition(
name=self._name,
graph_def=self,
executor_def=execute_in_process_executor,
resource_defs=resource_defs,
input_values=input_values,
).get_subset(op_selection=op_selection)
run_config = run_config if run_config is not None else {}
op_selection = check.opt_sequence_param(op_selection, "op_selection", str)
return ephemeral_job.execute_in_process(
run_config=run_config,
instance=instance,
raise_on_error=raise_on_error,
run_id=run_id,
)
@property
def parent_graph_def(self) -> Optional["GraphDefinition"]:
return None
@property
def is_subselected(self) -> bool:
return False
def get_resource_requirements(
self,
asset_layer: Optional["AssetLayer"],
) -> Iterator[ResourceRequirement]:
for node in self.node_dict.values():
yield from node.get_resource_requirements(outer_container=self, asset_layer=asset_layer)
for dagster_type in self.all_dagster_types():
yield from dagster_type.get_resource_requirements()
@public
@property
def name(self) -> str:
"""The name of the graph."""
return super().name
@public
@property
def tags(self) -> Mapping[str, str]:
"""The tags associated with the graph."""
return super().tags
@property
def pools(self) -> Set[str]:
pools = set()
for node_def in self.node_defs:
pools.update(node_def.pools)
return pools
@public
def alias(self, name: str) -> "PendingNodeInvocation":
"""Aliases the graph with a new name.
Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.
**Examples:**
.. code-block:: python
@job
def do_it_all():
my_graph.alias("my_graph_alias")
"""
return super().alias(name)
@public
def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":
"""Attaches the provided tags to the graph immutably.
Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.
**Examples:**
.. code-block:: python
@job
def do_it_all():
my_graph.tag({"my_tag": "my_value"})
"""
return super().tag(tags)
@public
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":
"""Attaches the provided hooks to the graph immutably.
Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.
**Examples:**
.. code-block:: python
@job
def do_it_all():
my_graph.with_hooks({my_hook})
"""
return super().with_hooks(hook_defs)
@public
def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":
"""Attaches the provided retry policy to the graph immutably.
Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.
**Examples:**
.. code-block:: python
@job
def do_it_all():
my_graph.with_retry_policy(RetryPolicy(max_retries=5))
"""
return super().with_retry_policy(retry_policy)
def resolve_input_to_destinations(
self, input_handle: NodeInputHandle
) -> Sequence[NodeInputHandle]:
all_destinations: list[NodeInputHandle] = []
for mapping in self.input_mappings:
if mapping.graph_input_name != input_handle.input_name:
continue
# recurse into graph structure
all_destinations += self.node_named(
mapping.maps_to.node_name
).definition.resolve_input_to_destinations(
NodeInputHandle(
node_handle=NodeHandle(
mapping.maps_to.node_name, parent=input_handle.node_handle
),
input_name=mapping.maps_to.input_name,
),
)
return all_destinations
def resolve_output_to_destinations(
self, output_name: str, handle: Optional[NodeHandle]
) -> Sequence[NodeInputHandle]:
all_destinations: list[NodeInputHandle] = []
for mapping in self.output_mappings:
if mapping.graph_output_name != output_name:
continue
output_pointer = mapping.maps_from
output_node = self.node_named(output_pointer.node_name)
all_destinations.extend(
output_node.definition.resolve_output_to_destinations(
output_pointer.output_name,
NodeHandle(output_pointer.node_name, parent=handle),
)
)
output_def = output_node.definition.output_def_named(output_pointer.output_name)
downstream_input_handles = (
self.dependency_structure.output_to_downstream_inputs_for_node(
output_pointer.node_name
).get(NodeOutput(output_node, output_def), [])
)
for input_handle in downstream_input_handles:
all_destinations.append(
NodeInputHandle(
node_handle=NodeHandle(input_handle.node_name, parent=handle),
input_name=input_handle.input_name,
)
)
return all_destinations
def get_op_handles(self, parent: NodeHandle) -> AbstractSet[NodeHandle]:
return {
op_handle
for node in self.nodes
for op_handle in node.definition.get_op_handles(NodeHandle(node.name, parent=parent))
}
def get_op_output_handles(self, parent: Optional[NodeHandle]) -> AbstractSet[NodeOutputHandle]:
return {
op_output_handle
for node in self.nodes
for op_output_handle in node.definition.get_op_output_handles(
NodeHandle(node.name, parent=parent)
)
}
def get_op_input_output_handle_pairs(
self, outer_handle: Optional[NodeHandle]
) -> AbstractSet[tuple[NodeOutputHandle, NodeInputHandle]]:
"""Get all pairs of op output handles and their downstream op input handles within the graph."""
result: set[tuple[NodeOutputHandle, NodeInputHandle]] = set()
for node in self.nodes:
node_handle = NodeHandle(node.name, parent=outer_handle)
if isinstance(node.definition, GraphDefinition):
result.update(node.definition.get_op_input_output_handle_pairs(node_handle))
for (
node_input,
upstream_outputs,
) in self.dependency_structure.input_to_upstream_outputs_for_node(node.name).items():
op_input_handles = node_input.node.definition.resolve_input_to_destinations(
NodeInputHandle(node_handle=node_handle, input_name=node_input.input_def.name)
)
for op_input_handle in op_input_handles:
for upstream_node_output in upstream_outputs:
origin_output_def, origin_node_handle = (
upstream_node_output.node.definition.resolve_output_to_origin(
upstream_node_output.output_def.name,
NodeHandle(upstream_node_output.node.name, parent=outer_handle),
)
)
origin_output_handle = NodeOutputHandle(
node_handle=origin_node_handle, output_name=origin_output_def.name
)
result.add((origin_output_handle, op_input_handle))
return result
| GraphDefinition |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis07.py | {
"start": 315,
"end": 1579
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "area"})
chart.axis_ids = [43321216, 47077248]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_x_axis({"name": "XXX"})
chart.set_y_axis({"name": "YYY"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pyinstaller__pyinstaller | PyInstaller/utils/win32/icon.py | {
"start": 2431,
"end": 2534
} | class ____(Structure):
_names_ = "idReserved", "idType", "idCount"
_format_ = "hhh"
| ICONDIRHEADER |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 2648,
"end": 6424
} | class ____(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
additional token type embeddings to encode tabular structure.
"""
def __init__(self, config):
super().__init__()
# we do not include config.disabled_features and config.disable_position_embeddings from the original implementation
# word embeddings
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
# position embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# token type embeddings
for i, type_vocab_sizes in enumerate(config.type_vocab_sizes):
name = f"token_type_embeddings_{i}"
setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size))
self.number_of_token_type_embeddings = len(config.type_vocab_sizes)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
# create absolute position embeddings
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
# when self.config.reset_position_index_per_cell is set to True, create relative position embeddings
if self.config.reset_position_index_per_cell:
# shape (batch_size, seq_len)
col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
# shape (batch_size, seq_len)
row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
# shape (batch_size, seq_len)
full_index = ProductIndexMap(col_index, row_index)
# shape (max_rows * max_columns,). First absolute position for every cell
first_position_per_segment = reduce_min(position_ids, full_index)[0]
# ? shape (batch_size, seq_len). First absolute position of the cell for every token
first_position = gather(first_position_per_segment, full_index)
# shape (1, seq_len)
position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
position_ids = torch.min(
torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position
)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
for i in range(self.number_of_token_type_embeddings):
name = f"token_type_embeddings_{i}"
embeddings += getattr(self, name)(token_type_ids[:, :, i])
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| TapasEmbeddings |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 25020,
"end": 25732
} | class ____(SuperClassPositionalArgs):
"""Loading this model should accept hparams and init in the super class."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def test_args(tmp_path):
"""Test for inheritance: super class takes positional arg, subclass takes varargs."""
hparams = {"test": 1}
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
| SubClassVarArgs |
python | django__django | tests/mail/tests.py | {
"start": 8976,
"end": 82152
} | class ____(MailTestsMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage(
"Subject", "Content\n", "from@example.com", ["to@example.com"]
)
message = email.message()
self.assertEqual(message["Subject"], "Subject")
self.assertEqual(message.get_payload(), "Content\n")
self.assertEqual(message["From"], "from@example.com")
self.assertEqual(message["To"], "to@example.com")
# RemovedInDjango70Warning.
@ignore_warnings(category=RemovedInDjango70Warning)
@mock.patch("django.core.mail.message.MIMEText.set_payload")
def test_nonascii_as_string_with_ascii_charset(self, mock_set_payload):
"""Line length check should encode the payload supporting
`surrogateescape`.
Following https://github.com/python/cpython/issues/76511, newer
versions of Python (3.12.3 and 3.13+) ensure that a message's
payload is encoded with the provided charset and `surrogateescape` is
used as the error handling strategy.
This test is heavily based on the test from the fix for the bug above.
Line length checks in SafeMIMEText's set_payload should also use the
same error handling strategy to avoid errors such as:
UnicodeEncodeError: 'utf-8' codec can't encode <...>: surrogates not
allowed
"""
# This test is specific to Python's legacy MIMEText. This can be safely
# removed when EmailMessage.message() uses Python's modern email API.
# (Using surrogateescape for non-utf8 is covered in test_encoding().)
from django.core.mail import SafeMIMEText
def simplified_set_payload(instance, payload, charset):
instance._payload = payload
mock_set_payload.side_effect = simplified_set_payload
text = (
"Text heavily based in Python's text for non-ascii messages: Föö bär"
).encode("iso-8859-1")
body = text.decode("ascii", errors="surrogateescape")
message = SafeMIMEText(body, "plain", "ascii")
mock_set_payload.assert_called_once()
self.assertEqual(message.get_payload(decode=True), text)
def test_multiple_recipients(self):
email = EmailMessage(
"Subject",
"Content\n",
"from@example.com",
["to@example.com", "other@example.com"],
)
message = email.message()
self.assertEqual(message["Subject"], "Subject")
self.assertEqual(message.get_payload(), "Content\n")
self.assertEqual(message["From"], "from@example.com")
self.assertEqual(message["To"], "to@example.com, other@example.com")
def test_header_omitted_for_no_to_recipients(self):
message = EmailMessage(
"Subject", "Content", "from@example.com", cc=["cc@example.com"]
).message()
self.assertNotIn("To", message)
def test_recipients_with_empty_strings(self):
"""
Empty strings in various recipient arguments are always stripped
off the final recipient list.
"""
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com", ""],
cc=["cc@example.com", ""],
bcc=["", "bcc@example.com"],
reply_to=["", None],
)
self.assertEqual(
email.recipients(), ["to@example.com", "cc@example.com", "bcc@example.com"]
)
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com"],
cc=["cc@example.com"],
)
message = email.message()
self.assertEqual(message["Cc"], "cc@example.com")
self.assertEqual(email.recipients(), ["to@example.com", "cc@example.com"])
# Test multiple CC with multiple To
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com", "other@example.com"],
cc=["cc@example.com", "cc.other@example.com"],
)
message = email.message()
self.assertEqual(message["Cc"], "cc@example.com, cc.other@example.com")
self.assertEqual(
email.recipients(),
[
"to@example.com",
"other@example.com",
"cc@example.com",
"cc.other@example.com",
],
)
# Testing with Bcc
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com", "other@example.com"],
cc=["cc@example.com", "cc.other@example.com"],
bcc=["bcc@example.com"],
)
message = email.message()
self.assertEqual(message["Cc"], "cc@example.com, cc.other@example.com")
self.assertEqual(
email.recipients(),
[
"to@example.com",
"other@example.com",
"cc@example.com",
"cc.other@example.com",
"bcc@example.com",
],
)
def test_cc_headers(self):
message = EmailMessage(
"Subject",
"Content",
"bounce@example.com",
["to@example.com"],
cc=["foo@example.com"],
headers={"Cc": "override@example.com"},
).message()
self.assertEqual(message.get_all("Cc"), ["override@example.com"])
def test_cc_in_headers_only(self):
message = EmailMessage(
"Subject",
"Content",
"bounce@example.com",
["to@example.com"],
headers={"Cc": "foo@example.com"},
).message()
self.assertEqual(message.get_all("Cc"), ["foo@example.com"])
def test_bcc_not_in_headers(self):
"""
A bcc address should be in the recipients,
but not in the (visible) message headers.
"""
email = EmailMessage(
to=["to@example.com"],
bcc=["bcc@example.com"],
)
message = email.message()
self.assertNotIn("Bcc", message)
self.assertNotIn("bcc@example.com", message.as_string())
self.assertEqual(email.recipients(), ["to@example.com", "bcc@example.com"])
def test_reply_to(self):
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com"],
reply_to=["reply_to@example.com"],
)
message = email.message()
self.assertEqual(message["Reply-To"], "reply_to@example.com")
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
["to@example.com"],
reply_to=["reply_to1@example.com", "reply_to2@example.com"],
)
message = email.message()
self.assertEqual(
message["Reply-To"], "reply_to1@example.com, reply_to2@example.com"
)
def test_recipients_as_tuple(self):
email = EmailMessage(
"Subject",
"Content",
"from@example.com",
("to@example.com", "other@example.com"),
cc=("cc@example.com", "cc.other@example.com"),
bcc=("bcc@example.com",),
)
message = email.message()
self.assertEqual(message["Cc"], "cc@example.com, cc.other@example.com")
self.assertEqual(
email.recipients(),
[
"to@example.com",
"other@example.com",
"cc@example.com",
"cc.other@example.com",
"bcc@example.com",
],
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(
TypeError, '"to" argument must be a list or tuple'
):
EmailMessage(to="foo@example.com")
with self.assertRaisesMessage(
TypeError, '"cc" argument must be a list or tuple'
):
EmailMessage(cc="foo@example.com")
with self.assertRaisesMessage(
TypeError, '"bcc" argument must be a list or tuple'
):
EmailMessage(bcc="foo@example.com")
with self.assertRaisesMessage(
TypeError, '"reply_to" argument must be a list or tuple'
):
EmailMessage(reply_to="reply_to@example.com")
def test_header_injection(self):
msg = "Header values may not contain linefeed or carriage return characters"
cases = [
{"subject": "Subject\nInjection Test"},
{"subject": gettext_lazy("Lazy Subject\nInjection Test")},
{"to": ["Name\nInjection test <to@example.com>"]},
]
for kwargs in cases:
with self.subTest(case=kwargs):
email = EmailMessage(**kwargs)
with self.assertRaisesMessage(ValueError, msg):
email.message()
def test_folding_white_space(self):
"""
Test for correct use of "folding white space" in long headers (#7747)
"""
email = EmailMessage(
"Long subject lines that get wrapped should contain a space continuation "
"character to comply with RFC 822",
)
message = email.message()
msg_bytes = message.as_bytes()
self.assertIn(
b"Subject: Long subject lines that get wrapped should contain a space\n"
b" continuation character to comply with RFC 822",
msg_bytes,
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage(headers=headers)
self.assertMessageHasHeaders(
email.message(),
{
("Message-ID", "foo"),
("date", "Fri, 09 Nov 2001 01:08:47 -0000"),
},
)
def test_datetime_in_date_header(self):
"""
A datetime in headers should be passed through to Python email intact,
so that it uses the email header date format.
"""
email = EmailMessage(
headers={"Date": datetime(2001, 11, 9, 1, 8, 47, tzinfo=timezone.utc)},
)
message = email.message()
self.assertEqual(message["Date"], "Fri, 09 Nov 2001 01:08:47 +0000")
# Not the default ISO format from force_str(strings_only=False).
self.assertNotEqual(message["Date"], "2001-11-09 01:08:47+00:00")
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
from_email="bounce@example.com",
headers={"From": "from@example.com"},
)
message = email.message()
self.assertEqual(message.get_all("From"), ["from@example.com"])
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage(
to=["list-subscriber@example.com", "list-subscriber2@example.com"],
headers={"To": "mailing-list@example.com"},
)
message = email.message()
self.assertEqual(message.get_all("To"), ["mailing-list@example.com"])
self.assertEqual(
email.to, ["list-subscriber@example.com", "list-subscriber2@example.com"]
)
# If we don't set the To header manually, it should default to the `to`
# argument to the constructor.
email = EmailMessage(
to=["list-subscriber@example.com", "list-subscriber2@example.com"],
)
message = email.message()
self.assertEqual(
message.get_all("To"),
["list-subscriber@example.com, list-subscriber2@example.com"],
)
self.assertEqual(
email.to, ["list-subscriber@example.com", "list-subscriber2@example.com"]
)
def test_to_in_headers_only(self):
message = EmailMessage(
headers={"To": "to@example.com"},
).message()
self.assertEqual(message.get_all("To"), ["to@example.com"])
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
reply_to=["foo@example.com"],
headers={"Reply-To": "override@example.com"},
)
message = email.message()
self.assertEqual(message.get_all("Reply-To"), ["override@example.com"])
def test_reply_to_in_headers_only(self):
message = EmailMessage(
headers={"Reply-To": "reply_to@example.com"},
).message()
self.assertEqual(message.get_all("Reply-To"), ["reply_to@example.com"])
def test_lazy_headers(self):
message = EmailMessage(
subject=gettext_lazy("subject"),
headers={"List-Unsubscribe": gettext_lazy("list-unsubscribe")},
).message()
self.assertEqual(message.get_all("Subject"), ["subject"])
self.assertEqual(message.get_all("List-Unsubscribe"), ["list-unsubscribe"])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
from_email="bounce@example.com",
headers={"From": "from@example.com"},
)
message = email.message()
self.assertEqual(message.get_all("From"), ["from@example.com"])
message = email.message()
self.assertEqual(message.get_all("From"), ["from@example.com"])
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains Unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
to=['"Firstname Sürname" <to@example.com>', "other@example.com"],
)
parsed = message_from_bytes(email.message().as_bytes())
self.assertEqual(
parsed["To"].addresses,
(
Address(display_name="Firstname Sürname", addr_spec="to@example.com"),
Address(addr_spec="other@example.com"),
),
)
email = EmailMessage(
to=['"Sürname, Firstname" <to@example.com>', "other@example.com"],
)
parsed = message_from_bytes(email.message().as_bytes())
self.assertEqual(
parsed["To"].addresses,
(
Address(display_name="Sürname, Firstname", addr_spec="to@example.com"),
Address(addr_spec="other@example.com"),
),
)
def test_unicode_headers(self):
email = EmailMessage(
subject="Gżegżółka",
to=["to@example.com"],
headers={
"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": "My Sürname is non-ASCII",
},
)
message = email.message()
# Verify sent headers use RFC 2047 encoded-words (not raw utf-8).
# The exact encoding details don't matter so long as the result parses
# to the original values.
msg_bytes = message.as_bytes()
self.assertTrue(msg_bytes.isascii()) # not unencoded utf-8.
parsed = message_from_bytes(msg_bytes)
self.assertEqual(parsed["Subject"], "Gżegżółka")
self.assertEqual(
parsed["Sender"].address,
Address(display_name="Firstname Sürname", addr_spec="sender@example.com"),
)
self.assertEqual(parsed["Comments"], "My Sürname is non-ASCII")
def test_non_utf8_headers_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
EmailMultiAlternatives as well.
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email = "from@example.com"
to = '"Sürname, Firstname" <to@example.com>'
text_content = "This is an important message."
html_content = "<p>This is an <strong>important</strong> message.</p>"
email = EmailMultiAlternatives(
"Message from Firstname Sürname",
text_content,
from_email,
[to],
headers=headers,
)
email.attach_alternative(html_content, "text/html")
email.encoding = "iso-8859-1"
message = email.message()
# Verify sent headers use RFC 2047 encoded-words, not raw utf-8.
msg_bytes = message.as_bytes()
self.assertTrue(msg_bytes.isascii())
# Verify sent headers parse to original values.
parsed = message_from_bytes(msg_bytes)
self.assertEqual(parsed["Subject"], "Message from Firstname Sürname")
self.assertEqual(
parsed["To"].addresses,
(Address(display_name="Sürname, Firstname", addr_spec="to@example.com"),),
)
def test_multipart_with_attachments(self):
"""
EmailMultiAlternatives includes alternatives if the body is empty and
it has attachments.
"""
msg = EmailMultiAlternatives(body="")
html_content = "<p>This is <strong>html</strong></p>"
msg.attach_alternative(html_content, "text/html")
msg.attach("example.txt", "Text file content", "text/plain")
self.assertIn(html_content, msg.message().as_string())
def test_alternatives(self):
msg = EmailMultiAlternatives()
html_content = "<p>This is <strong>html</strong></p>"
mime_type = "text/html"
msg.attach_alternative(html_content, mime_type)
self.assertIsInstance(msg.alternatives[0], EmailAlternative)
self.assertEqual(msg.alternatives[0][0], html_content)
self.assertEqual(msg.alternatives[0].content, html_content)
self.assertEqual(msg.alternatives[0][1], mime_type)
self.assertEqual(msg.alternatives[0].mimetype, mime_type)
self.assertIn(html_content, msg.message().as_string())
def test_alternatives_constructor(self):
html_content = "<p>This is <strong>html</strong></p>"
mime_type = "text/html"
msg = EmailMultiAlternatives(
alternatives=[EmailAlternative(html_content, mime_type)]
)
self.assertIsInstance(msg.alternatives[0], EmailAlternative)
self.assertEqual(msg.alternatives[0][0], html_content)
self.assertEqual(msg.alternatives[0].content, html_content)
self.assertEqual(msg.alternatives[0][1], mime_type)
self.assertEqual(msg.alternatives[0].mimetype, mime_type)
self.assertIn(html_content, msg.message().as_string())
def test_alternatives_constructor_from_tuple(self):
html_content = "<p>This is <strong>html</strong></p>"
mime_type = "text/html"
msg = EmailMultiAlternatives(alternatives=[(html_content, mime_type)])
self.assertIsInstance(msg.alternatives[0], EmailAlternative)
self.assertEqual(msg.alternatives[0][0], html_content)
self.assertEqual(msg.alternatives[0].content, html_content)
self.assertEqual(msg.alternatives[0][1], mime_type)
self.assertEqual(msg.alternatives[0].mimetype, mime_type)
self.assertIn(html_content, msg.message().as_string())
def test_alternative_alternatives(self):
"""
Alternatives can be attached as either string or bytes
and need not use a text/* mimetype.
"""
cases = [
# (mimetype, content, expected decoded payload)
("application/x-ccmail-rtf", b"non-text\x07bytes", b"non-text\x07bytes"),
("application/x-ccmail-rtf", "non-text\x07string", b"non-text\x07string"),
("text/x-amp-html", b"text bytes\n", b"text bytes\n"),
("text/x-amp-html", "text string\n", b"text string\n"),
]
for mimetype, content, expected in cases:
with self.subTest(case=(mimetype, content)):
email = EmailMultiAlternatives()
email.attach_alternative(content, mimetype)
msg = email.message()
self.assertEqual(msg.get_content_type(), "multipart/alternative")
alternative = msg.get_payload()[0]
self.assertEqual(alternative.get_content_type(), mimetype)
self.assertEqual(alternative.get_payload(decode=True), expected)
def test_alternatives_and_attachment_serializable(self):
html_content = "<p>This is <strong>html</strong></p>"
mime_type = "text/html"
msg = EmailMultiAlternatives(alternatives=[(html_content, mime_type)])
msg.attach("test.txt", "This is plain text.", "plain/text")
# Alternatives and attachments can be serialized.
restored = pickle.loads(pickle.dumps(msg))
self.assertEqual(restored.subject, msg.subject)
self.assertEqual(restored.body, msg.body)
self.assertEqual(restored.from_email, msg.from_email)
self.assertEqual(restored.to, msg.to)
self.assertEqual(restored.alternatives, msg.alternatives)
self.assertEqual(restored.attachments, msg.attachments)
def test_none_body(self):
msg = EmailMessage("subject", None, "from@example.com", ["to@example.com"])
self.assertEqual(msg.body, "")
# The modern email API forces trailing newlines on all text/* parts,
# even an empty body.
self.assertEqual(msg.message().get_payload(), "\n")
@mock.patch("socket.getfqdn", return_value="漢字")
def test_non_ascii_dns_non_unicode_email(self, mocked_getfqdn):
delattr(DNS_NAME, "_fqdn")
email = EmailMessage()
email.encoding = "iso-8859-1"
self.assertIn("@xn--p8s937b>", email.message()["Message-ID"])
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage(body="Firstname Sürname is a great guy.\n")
email.encoding = "iso-8859-1"
message = email.message()
self.assertEqual(message["Content-Type"], 'text/plain; charset="iso-8859-1"')
# Check that body is actually encoded with iso-8859-1.
msg_bytes = message.as_bytes()
self.assertEqual(message["Content-Transfer-Encoding"], "8bit")
self.assertIn(b"Firstname S\xfc", msg_bytes)
parsed = message_from_bytes(msg_bytes)
self.assertEqual(parsed.get_content(), "Firstname Sürname is a great guy.\n")
def test_encoding_alternatives(self):
"""
Encode alternatives correctly with other encodings than utf-8.
"""
text_content = "Firstname Sürname is a great guy.\n"
html_content = "<p>Firstname Sürname is a <strong>great</strong> guy.</p>\n"
email = EmailMultiAlternatives(body=text_content)
email.encoding = "iso-8859-1"
email.attach_alternative(html_content, "text/html")
message = email.message()
# Check both parts are sent using the specified encoding.
self.assertEqual(
message.get_payload(0)["Content-Type"], 'text/plain; charset="iso-8859-1"'
)
self.assertEqual(
message.get_payload(1)["Content-Type"], 'text/html; charset="iso-8859-1"'
)
# Check both parts decode to the original content at the receiving end.
parsed = message_from_bytes(message.as_bytes())
self.assertEqual(parsed.get_body(("plain",)).get_content(), text_content)
self.assertEqual(parsed.get_body(("html",)).get_content(), html_content)
def test_attachments(self):
msg = EmailMessage()
file_name = "example.txt"
file_content = "Text file content\n"
mime_type = "text/plain"
msg.attach(file_name, file_content, mime_type)
self.assertEqual(msg.attachments[0][0], file_name)
self.assertEqual(msg.attachments[0].filename, file_name)
self.assertEqual(msg.attachments[0][1], file_content)
self.assertEqual(msg.attachments[0].content, file_content)
self.assertEqual(msg.attachments[0][2], mime_type)
self.assertEqual(msg.attachments[0].mimetype, mime_type)
attachments = self.get_decoded_attachments(msg)
self.assertEqual(attachments[0], (file_name, file_content, mime_type))
def test_attachments_constructor(self):
file_name = "example.txt"
file_content = "Text file content\n"
mime_type = "text/plain"
msg = EmailMessage(
attachments=[EmailAttachment(file_name, file_content, mime_type)]
)
self.assertIsInstance(msg.attachments[0], EmailAttachment)
self.assertEqual(msg.attachments[0][0], file_name)
self.assertEqual(msg.attachments[0].filename, file_name)
self.assertEqual(msg.attachments[0][1], file_content)
self.assertEqual(msg.attachments[0].content, file_content)
self.assertEqual(msg.attachments[0][2], mime_type)
self.assertEqual(msg.attachments[0].mimetype, mime_type)
attachments = self.get_decoded_attachments(msg)
self.assertEqual(attachments[0], (file_name, file_content, mime_type))
def test_attachments_constructor_from_tuple(self):
file_name = "example.txt"
file_content = "Text file content\n"
mime_type = "text/plain"
msg = EmailMessage(attachments=[(file_name, file_content, mime_type)])
self.assertIsInstance(msg.attachments[0], EmailAttachment)
self.assertEqual(msg.attachments[0][0], file_name)
self.assertEqual(msg.attachments[0].filename, file_name)
self.assertEqual(msg.attachments[0][1], file_content)
self.assertEqual(msg.attachments[0].content, file_content)
self.assertEqual(msg.attachments[0][2], mime_type)
self.assertEqual(msg.attachments[0].mimetype, mime_type)
attachments = self.get_decoded_attachments(msg)
self.assertEqual(attachments[0], (file_name, file_content, mime_type))
def test_attachments_constructor_omit_mimetype(self):
"""
The mimetype can be omitted from an attachment tuple.
"""
msg = EmailMessage(attachments=[("filename1", "content1")])
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "filename1")
self.assertEqual(content, b"content1")
self.assertEqual(mimetype, "application/octet-stream")
def test_attachments_with_alternative_parts(self):
"""
Message with attachment and alternative has correct structure (#9367).
"""
text_content = "This is an important message."
html_content = "<p>This is an <strong>important</strong> message.</p>"
msg = EmailMultiAlternatives(body=text_content)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), "multipart/mixed")
self.assertEqual(message.get_default_type(), "text/plain")
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), "multipart/alternative")
self.assertEqual(payload[1].get_content_type(), "application/pdf")
def test_decoded_attachment_text_MIMEPart(self):
# See also test_attach_mime_part() and
# test_attach_mime_part_in_constructor().
txt = MIMEPart()
txt.set_content("content1")
msg = EmailMessage(attachments=[txt])
payload = msg.message().get_payload()
self.assertEqual(payload[0], txt)
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
msg = EmailMessage(body="Content")
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
attachment = self.get_decoded_attachments(msg)[0]
self.assertEqual(attachment.filename, "une pièce jointe.pdf")
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent in some form even if a mismatched
mimetype is specified.
"""
files = (
# filename, actual mimetype
("file.txt", "text/plain"),
("file.png", "image/png"),
("file_txt", None),
("file_png", None),
("file_txt.png", "image/png"),
("file_png.txt", "text/plain"),
("file.eml", "message/rfc822"),
)
test_mimetypes = ["text/plain", "image/png", None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
with self.subTest(
basename=basename, real_mimetype=real_mimetype, mimetype=mimetype
):
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
expected_mimetype = (
mimetype or real_mimetype or "application/octet-stream"
)
file_path = Path(__file__).parent / "attachments" / basename
expected_content = file_path.read_bytes()
if expected_mimetype.startswith("text/"):
try:
expected_content = expected_content.decode()
except UnicodeDecodeError:
expected_mimetype = "application/octet-stream"
email = EmailMessage()
email.attach_file(file_path, mimetype=mimetype)
# Check EmailMessage.attachments.
self.assertEqual(len(email.attachments), 1)
self.assertEqual(email.attachments[0].filename, basename)
self.assertEqual(email.attachments[0].mimetype, expected_mimetype)
self.assertEqual(email.attachments[0].content, expected_content)
# Check attachments in the generated message.
# (The actual content is not checked as variations in
# platform line endings and rfc822 refolding complicate the
# logic.)
attachments = self.get_decoded_attachments(email)
self.assertEqual(len(attachments), 1)
actual = attachments[0]
self.assertEqual(actual.filename, basename)
self.assertEqual(actual.mimetype, expected_mimetype)
def test_attach_text_as_bytes(self):
"""
For text/* attachments, EmailMessage.attach() decodes bytes as UTF-8
if possible and changes to DEFAULT_ATTACHMENT_MIME_TYPE if not.
"""
email = EmailMessage()
# Mimetype guessing identifies these as text/plain from the .txt
# extensions.
email.attach("utf8.txt", "ütƒ-8\n".encode())
email.attach("not-utf8.txt", b"\x86unknown-encoding\n")
attachments = self.get_decoded_attachments(email)
self.assertEqual(attachments[0], ("utf8.txt", "ütƒ-8\n", "text/plain"))
self.assertEqual(
attachments[1],
("not-utf8.txt", b"\x86unknown-encoding\n", "application/octet-stream"),
)
def test_attach_text_as_bytes_using_property(self):
"""
The logic described in test_attach_text_as_bytes() also applies
when directly setting the EmailMessage.attachments property.
"""
email = EmailMessage()
email.attachments = [
("utf8.txt", "ütƒ-8\n".encode(), "text/plain"),
("not-utf8.txt", b"\x86unknown-encoding\n", "text/plain"),
]
attachments = self.get_decoded_attachments(email)
self.assertEqual(len(attachments), 2)
attachments = self.get_decoded_attachments(email)
self.assertEqual(attachments[0], ("utf8.txt", "ütƒ-8\n", "text/plain"))
self.assertEqual(
attachments[1],
("not-utf8.txt", b"\x86unknown-encoding\n", "application/octet-stream"),
)
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
in a form that can be decoded at the receiving end.
"""
msg = EmailMessage()
msg.attach("file.txt", b"\xc3\xa4\n") # UTF-8 encoded a-umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "file.txt")
self.assertEqual(content, "ä\n") # (decoded)
self.assertEqual(mimetype, "text/plain")
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage()
msg.attach("file.txt", b"\xff") # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "file.txt")
# Content should be passed through unmodified.
self.assertEqual(content, b"\xff")
self.assertEqual(mimetype, "application/octet-stream")
def test_attach_8bit_rfc822_message_non_ascii(self):
"""
Attaching a message that uses 8bit content transfer encoding for
non-ASCII characters should not raise a UnicodeEncodeError (#36119).
"""
attachment = dedent(
"""\
Subject: A message using 8bit CTE
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
¡8-bit content!
"""
).encode()
email = EmailMessage()
email.attach("attachment.eml", attachment, "message/rfc822")
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0].get_content_type(), "message/rfc822")
attached_message = attachments[0].get_content()
self.assertEqual(attached_message.get_content().rstrip(), "¡8-bit content!")
self.assertEqual(attached_message["Content-Transfer-Encoding"], "8bit")
self.assertEqual(attached_message.get_content_type(), "text/plain")
# RemovedInDjango70Warning.
def test_attach_mime_image(self):
"""
EmailMessage.attach() docs: "You can pass it
a single argument that is a MIMEBase instance."
"""
msg = (
"MIMEBase attachments are deprecated."
" Use an email.message.MIMEPart instead."
)
# This also verifies complex attachments with extra header fields.
email = EmailMessage()
image = MIMEImage(b"GIF89a...", "gif")
image["Content-Disposition"] = "inline"
image["Content-ID"] = "<content-id@example.org>"
with self.assertWarnsMessage(RemovedInDjango70Warning, msg):
email.attach(image)
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
image_att = attachments[0]
self.assertEqual(image_att.get_content_type(), "image/gif")
self.assertEqual(image_att.get_content_disposition(), "inline")
self.assertEqual(image_att["Content-ID"], "<content-id@example.org>")
self.assertEqual(image_att.get_content(), b"GIF89a...")
self.assertIsNone(image_att.get_filename())
def test_attach_mime_part(self):
"""
EmailMessage.attach() docs: "You can pass it
a single argument that is a MIMEPart object."
"""
# This also verifies complex attachments with extra header fields.
email = EmailMessage()
image = MIMEPart()
image.set_content(
b"GIF89a...",
maintype="image",
subtype="gif",
disposition="inline",
cid="<content-id@example.org>",
)
email.attach(image)
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
image_att = attachments[0]
self.assertEqual(image_att.get_content_type(), "image/gif")
self.assertEqual(image_att.get_content_disposition(), "inline")
self.assertEqual(image_att["Content-ID"], "<content-id@example.org>")
self.assertEqual(image_att.get_content(), b"GIF89a...")
self.assertIsNone(image_att.get_filename())
# RemovedInDjango70Warning.
def test_attach_mime_image_in_constructor(self):
msg = (
"MIMEBase attachments are deprecated."
" Use an email.message.MIMEPart instead."
)
image = MIMEImage(b"\x89PNG...", "png")
image["Content-Disposition"] = "attachment; filename=test.png"
with self.assertWarnsMessage(RemovedInDjango70Warning, msg):
email = EmailMessage(attachments=[image])
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
image_att = attachments[0]
self.assertEqual(image_att.get_content_type(), "image/png")
self.assertEqual(image_att.get_content(), b"\x89PNG...")
self.assertEqual(image_att.get_filename(), "test.png")
def test_attach_mime_part_in_constructor(self):
image = MIMEPart()
image.set_content(
b"\x89PNG...", maintype="image", subtype="png", filename="test.png"
)
email = EmailMessage(attachments=[image])
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
image_att = attachments[0]
self.assertEqual(image_att.get_content_type(), "image/png")
self.assertEqual(image_att.get_content(), b"\x89PNG...")
self.assertEqual(image_att.get_content_disposition(), "attachment")
self.assertEqual(image_att.get_filename(), "test.png")
def test_attach_rfc822_message(self):
"""
EmailMessage.attach() docs: "If you specify a mimetype of
message/rfc822, it will also accept django.core.mail.EmailMessage and
email.message.Message."
"""
# django.core.mail.EmailMessage
django_email = EmailMessage("child subject", "child body")
# email.message.Message
py_message = PyMessage()
py_message["Subject"] = "child subject"
py_message.set_payload("child body")
# email.message.EmailMessage
py_email_message = PyEmailMessage()
py_email_message["Subject"] = "child subject"
py_email_message.set_content("child body")
cases = [
django_email,
py_message,
py_email_message,
# Should also allow message serialized as str or bytes.
py_message.as_string(),
py_message.as_bytes(),
]
for child_message in cases:
with self.subTest(child_type=child_message.__class__):
email = EmailMessage("parent message", "parent body")
email.attach(content=child_message, mimetype="message/rfc822")
self.assertEqual(len(email.attachments), 1)
self.assertIsInstance(email.attachments[0], EmailAttachment)
self.assertEqual(email.attachments[0].mimetype, "message/rfc822")
# Make sure it is serialized correctly: a message/rfc822
# attachment whose "body" content (payload) is the
# "encapsulated" (child) message.
attachments = self.get_raw_attachments(email)
self.assertEqual(len(attachments), 1)
rfc822_attachment = attachments[0]
self.assertEqual(rfc822_attachment.get_content_type(), "message/rfc822")
attached_message = rfc822_attachment.get_content()
self.assertEqual(attached_message["Subject"], "child subject")
self.assertEqual(attached_message.get_content().rstrip(), "child body")
# Regression for #18967: Per RFC 2046 5.2.1, "No encoding other
# than '7bit', '8bit', or 'binary' is permitted for the body of
# a 'message/rfc822' entity." (Default CTE is "7bit".)
cte = rfc822_attachment.get("Content-Transfer-Encoding", "7bit")
self.assertIn(cte, ("7bit", "8bit", "binary"))
# Any properly declared CTE is allowed for the attached message
# itself (including quoted-printable or base64). For the plain
# ASCII content in this test, we'd expect 7bit.
child_cte = attached_message.get("Content-Transfer-Encoding", "7bit")
self.assertEqual(child_cte, "7bit")
self.assertEqual(attached_message.get_content_type(), "text/plain")
def test_attach_mimepart_prohibits_other_params(self):
email_msg = EmailMessage()
txt = MIMEPart()
txt.set_content("content")
msg = (
"content and mimetype must not be given when a MIMEPart instance "
"is provided."
)
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, content="content")
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, mimetype="text/plain")
def test_attach_content_is_required(self):
email_msg = EmailMessage()
msg = "content must be provided."
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach("file.txt", mimetype="application/pdf")
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(to=["to@example.com"])
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo="bar")
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection("mail.custombackend.EmailBackend")
self.assertTrue(hasattr(conn, "test_outbox"))
email = EmailMessage(to=["to@example.com"])
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.smtp.EmailBackend"),
smtp.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.locmem.EmailBackend"),
locmem.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.dummy.EmailBackend"),
dummy.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.console.EmailBackend"),
console.EmailBackend,
)
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertIsInstance(
mail.get_connection(
"django.core.mail.backends.filebased.EmailBackend",
file_path=tmp_dir,
),
filebased.EmailBackend,
)
msg = " not object"
with self.assertRaisesMessage(TypeError, msg):
mail.get_connection(
"django.core.mail.backends.filebased.EmailBackend", file_path=object()
)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend")
def test_connection_arg_send_mail(self):
mail.outbox = []
# Send using non-default connection.
connection = mail.get_connection("mail.custombackend.EmailBackend")
send_mail(
"Subject",
"Content",
"from@example.com",
["to@example.com"],
connection=connection,
)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "Subject")
@override_settings(EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend")
def test_connection_arg_send_mass_mail(self):
mail.outbox = []
# Send using non-default connection.
connection = mail.get_connection("mail.custombackend.EmailBackend")
send_mass_mail(
[
("Subject1", "Content1", "from1@example.com", ["to1@example.com"]),
("Subject2", "Content2", "from2@example.com", ["to2@example.com"]),
],
connection=connection,
)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, "Subject1")
self.assertEqual(connection.test_outbox[1].subject, "Subject2")
@override_settings(
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
ADMINS=["nobody@example.com"],
)
def test_connection_arg_mail_admins(self):
mail.outbox = []
# Send using non-default connection.
connection = mail.get_connection("mail.custombackend.EmailBackend")
mail_admins("Admin message", "Content", connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "[Django] Admin message")
@override_settings(
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
MANAGERS=["nobody@example.com"],
)
def test_connection_arg_mail_managers(self):
mail.outbox = []
# Send using non-default connection.
connection = mail.get_connection("mail.custombackend.EmailBackend")
mail_managers("Manager message", "Content", connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "[Django] Manager message")
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(body="From the future")
self.assertNotIn(b">From the future", email.message().as_bytes())
def test_body_content_transfer_encoding(self):
# Shouldn't use base64 or quoted-printable, instead should detect it
# can represent content with 7-bit data (#3472, #11212).
msg = EmailMessage(body="Body with only ASCII characters.")
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: 7bit", s)
# Shouldn't use base64 or quoted-printable, instead should detect
# it can represent content with 8-bit data.
msg = EmailMessage(body="Body with latin characters: àáä.")
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: 8bit", s)
# Long body lines that require folding should use quoted-printable or
# base64, whichever is shorter.
msg = EmailMessage(
body=(
"Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.\n"
"Because it has a line > 78 utf-8 octets, it should be folded, and "
"must then be encoded using the shorter of quoted-printable or base64."
),
)
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: quoted-printable", s)
# RemovedInDjango70Warning.
@ignore_warnings(category=RemovedInDjango70Warning)
def test_sanitize_address(self):
"""Email addresses are properly sanitized."""
# Tests the internal sanitize_address() function. Many of these cases
# are duplicated in test_address_header_handling(), which verifies
# headers in the generated message.
from django.core.mail.message import sanitize_address
for email_address, encoding, expected_result in (
# ASCII addresses.
("to@example.com", "ascii", "to@example.com"),
("to@example.com", "utf-8", "to@example.com"),
(("A name", "to@example.com"), "ascii", "A name <to@example.com>"),
(
("A name", "to@example.com"),
"utf-8",
"A name <to@example.com>",
),
("localpartonly", "ascii", "localpartonly"),
# ASCII addresses with display names.
("A name <to@example.com>", "ascii", "A name <to@example.com>"),
("A name <to@example.com>", "utf-8", "A name <to@example.com>"),
('"A name" <to@example.com>', "ascii", "A name <to@example.com>"),
('"A name" <to@example.com>', "utf-8", "A name <to@example.com>"),
# Unicode addresses: IDNA encoded domain supported per RFC-5890.
("to@éxample.com", "utf-8", "to@xn--xample-9ua.com"),
# The next three cases should be removed when fixing #35713.
# (An 'encoded-word' localpart is prohibited by RFC-2047, and not
# supported by any known mail service.)
("tó@example.com", "utf-8", "=?utf-8?b?dMOz?=@example.com"),
(
("Tó Example", "tó@example.com"),
"utf-8",
"=?utf-8?q?T=C3=B3_Example?= <=?utf-8?b?dMOz?=@example.com>",
),
(
"Tó Example <tó@example.com>",
"utf-8",
# (Not RFC-2047 compliant.)
"=?utf-8?q?T=C3=B3_Example?= <=?utf-8?b?dMOz?=@example.com>",
),
# IDNA addresses with display names.
(
"To Example <to@éxample.com>",
"ascii",
"To Example <to@xn--xample-9ua.com>",
),
(
"To Example <to@éxample.com>",
"utf-8",
"To Example <to@xn--xample-9ua.com>",
),
# Addresses with two @ signs.
('"to@other.com"@example.com', "utf-8", r'"to@other.com"@example.com'),
(
'"to@other.com" <to@example.com>',
"utf-8",
'"to@other.com" <to@example.com>',
),
(
("To Example", "to@other.com@example.com"),
"utf-8",
'To Example <"to@other.com"@example.com>',
),
# Addresses with long unicode display names.
(
"Tó Example very long" * 4 + " <to@example.com>",
"utf-8",
"=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT"
"=C3=B3_Example_?=\n"
" =?utf-8?q?very_longT=C3=B3_Example_very_long?= "
"<to@example.com>",
),
(
("Tó Example very long" * 4, "to@example.com"),
"utf-8",
"=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT"
"=C3=B3_Example_?=\n"
" =?utf-8?q?very_longT=C3=B3_Example_very_long?= "
"<to@example.com>",
),
# Address with long display name and unicode domain.
(
("To Example very long" * 4, "to@exampl€.com"),
"utf-8",
"To Example very longTo Example very longTo Example very longT"
"o Example very\n"
" long <to@xn--exampl-nc1c.com>",
),
):
with self.subTest(email_address=email_address, encoding=encoding):
self.assertEqual(
sanitize_address(email_address, encoding), expected_result
)
# RemovedInDjango70Warning.
@ignore_warnings(category=RemovedInDjango70Warning)
def test_sanitize_address_invalid(self):
# Tests the internal sanitize_address() function. Note that Django's
# EmailMessage.message() will not catch these cases, as it only calls
# sanitize_address() if an address also includes non-ASCII chars.
# Django detects these cases in the SMTP EmailBackend during sending.
# See SMTPBackendTests.test_avoids_sending_to_invalid_addresses()
# below.
from django.core.mail.message import sanitize_address
for email_address in (
# Invalid address with two @ signs.
"to@other.com@example.com",
# Invalid address without the quotes.
"to@other.com <to@example.com>",
# Other invalid addresses.
"@",
"to@",
"@example.com",
("", ""),
):
with self.subTest(email_address=email_address):
with self.assertRaisesMessage(ValueError, "Invalid address"):
sanitize_address(email_address, encoding="utf-8")
# RemovedInDjango70Warning.
@ignore_warnings(category=RemovedInDjango70Warning)
def test_sanitize_address_header_injection(self):
# Tests the internal sanitize_address() function. These cases are
# duplicated in test_address_header_handling(), which verifies headers
# in the generated message.
from django.core.mail.message import sanitize_address
msg = "Invalid address; address parts cannot contain newlines."
tests = [
"Name\nInjection <to@example.com>",
("Name\nInjection", "to@xample.com"),
"Name <to\ninjection@example.com>",
("Name", "to\ninjection@example.com"),
]
for email_address in tests:
with self.subTest(email_address=email_address):
with self.assertRaisesMessage(ValueError, msg):
sanitize_address(email_address, encoding="utf-8")
def test_address_header_handling(self):
# This verifies the modern email API's address header handling.
cases = [
# (address, expected_display_name, expected_addr_spec)
("to@example.com", "", "to@example.com"),
# Addresses with display-names.
("A name <to@example.com>", "A name", "to@example.com"),
('"A name" <to@example.com>', "A name", "to@example.com"),
(
'"Comma, requires quotes" <to@example.com>',
"Comma, requires quotes",
"to@example.com",
),
('"to@other.com" <to@example.com>', "to@other.com", "to@example.com"),
# Non-ASCII addr-spec: IDNA encoding for domain.
# (Note: no RFC permits encoding a non-ASCII localpart.)
("to@éxample.com", "", "to@xn--xample-9ua.com"),
(
"To Example <to@éxample.com>",
"To Example",
"to@xn--xample-9ua.com",
),
# Pre-encoded IDNA domain is left as is.
# (Make sure IDNA 2008 is not downgraded to IDNA 2003.)
("to@xn--fa-hia.example.com", "", "to@xn--fa-hia.example.com"),
(
"<to@xn--10cl1a0b660p.example.com>",
"",
"to@xn--10cl1a0b660p.example.com",
),
(
'"Display, Name" <to@xn--nxasmm1c.example.com>',
"Display, Name",
"to@xn--nxasmm1c.example.com",
),
# Non-ASCII display-name.
("Tó Example <to@example.com>", "Tó Example", "to@example.com"),
# Addresses with two @ signs (quoted-string localpart).
('"to@other.com"@example.com', "", '"to@other.com"@example.com'),
(
'To Example <"to@other.com"@example.com>',
"To Example",
'"to@other.com"@example.com',
),
# Addresses with long non-ASCII display names.
(
"Tó Example very long" * 4 + " <to@example.com>",
"Tó Example very long" * 4,
"to@example.com",
),
# Address with long display name and non-ASCII domain.
(
"To Example very long" * 4 + " <to@exampl€.com>",
"To Example very long" * 4,
"to@xn--exampl-nc1c.com",
),
]
for address, name, addr in cases:
with self.subTest(address=address):
email = EmailMessage(to=[address])
parsed = message_from_bytes(email.message().as_bytes())
actual = parsed["To"].addresses
expected = (Address(display_name=name, addr_spec=addr),)
self.assertEqual(actual, expected)
def test_address_header_injection(self):
msg = "Header values may not contain linefeed or carriage return characters"
cases = [
"Name\nInjection <to@example.com>",
'"Name\nInjection" <to@example.com>',
'"Name\rInjection" <to@example.com>',
'"Name\r\nInjection" <to@example.com>',
"Name <to\ninjection@example.com>",
"to\ninjection@example.com",
]
# Structured address header fields (from RFC 5322 3.6.x).
headers = [
"From",
"Sender",
"Reply-To",
"To",
"Cc",
# "Bcc" is not checked by EmailMessage.message().
# See SMTPBackendTests.test_avoids_sending_to_invalid_addresses().
"Resent-From",
"Resent-Sender",
"Resent-To",
"Resent-Cc",
"Resent-Bcc",
]
for header in headers:
for email_address in cases:
with self.subTest(header=header, email_address=email_address):
# Construct an EmailMessage with header set to
# email_address. Specific constructor params vary by
# header.
if header == "From":
email = EmailMessage(from_email=email_address)
elif header in ("To", "Cc", "Bcc", "Reply-To"):
param = header.lower().replace("-", "_")
email = EmailMessage(**{param: [email_address]})
else:
email = EmailMessage(headers={header: email_address})
with self.assertRaisesMessage(ValueError, msg):
email.message()
def test_localpart_only_address(self):
"""
Django allows sending to a localpart-only email address
(without @domain). This is not a valid RFC 822/2822/5322 addr-spec, but
is accepted by some SMTP servers for local delivery.
Regression for #15042.
"""
email = EmailMessage(to=["localpartonly"])
parsed = message_from_bytes(email.message().as_bytes())
self.assertEqual(
parsed["To"].addresses, (Address(username="localpartonly", domain=""),)
)
def test_email_multi_alternatives_content_mimetype_none(self):
email_msg = EmailMultiAlternatives()
msg = "Both content and mimetype must be provided."
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative(None, "text/html")
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative("<p>content</p>", None)
def test_mime_structure(self):
"""
Check generated messages have the expected MIME parts and nesting.
"""
html_body = EmailAlternative("<p>HTML</p>", "text/html")
image = EmailAttachment("image.gif", b"\x89PNG...", "image/png")
rfc822_attachment = EmailAttachment(
None, EmailMessage(body="text"), "message/rfc822"
)
cases = [
# name, email (EmailMessage or subclass), expected structure
(
"single body",
EmailMessage(body="text"),
"""
text/plain
""",
),
(
"single body with attachment",
EmailMessage(body="text", attachments=[image]),
"""
multipart/mixed
text/plain
image/png
""",
),
(
"alternative bodies",
EmailMultiAlternatives(body="text", alternatives=[html_body]),
"""
multipart/alternative
text/plain
text/html
""",
),
(
"alternative bodies with attachments",
EmailMultiAlternatives(
body="text", alternatives=[html_body], attachments=[image]
),
"""
multipart/mixed
multipart/alternative
text/plain
text/html
image/png
""",
),
(
"alternative bodies with rfc822 attachment",
EmailMultiAlternatives(
body="text",
alternatives=[html_body],
attachments=[rfc822_attachment],
),
"""
multipart/mixed
multipart/alternative
text/plain
text/html
message/rfc822
text/plain
""",
),
(
"attachment only",
EmailMessage(attachments=[image]),
# Avoid empty text/plain body.
"""
multipart/mixed
image/png
""",
),
(
"alternative only",
EmailMultiAlternatives(alternatives=[html_body]),
# Avoid empty text/plain body.
"""
multipart/alternative
text/html
""",
),
(
"alternative and attachment only",
EmailMultiAlternatives(alternatives=[html_body], attachments=[image]),
"""
multipart/mixed
multipart/alternative
text/html
image/png
""",
),
(
"empty EmailMessage",
EmailMessage(),
"""
text/plain
""",
),
(
"empty EmailMultiAlternatives",
EmailMultiAlternatives(),
"""
text/plain
""",
),
]
for name, email, expected in cases:
expected = dedent(expected).lstrip()
with self.subTest(name=name):
message = email.message()
structure = self.get_message_structure(message)
self.assertEqual(structure, expected)
def test_body_contains(self):
email_msg = EmailMultiAlternatives()
email_msg.body = "I am content."
self.assertIs(email_msg.body_contains("I am"), True)
self.assertIs(email_msg.body_contains("I am content."), True)
email_msg.attach_alternative("<p>I am different content.</p>", "text/html")
self.assertIs(email_msg.body_contains("I am"), True)
self.assertIs(email_msg.body_contains("I am content."), False)
self.assertIs(email_msg.body_contains("<p>I am different content.</p>"), False)
def test_body_contains_alternative_non_text(self):
email_msg = EmailMultiAlternatives()
email_msg.body = "I am content."
email_msg.attach_alternative("I am content.", "text/html")
email_msg.attach_alternative(b"I am a song.", "audio/mpeg")
self.assertIs(email_msg.body_contains("I am content"), True)
def test_all_params_optional(self):
"""
EmailMessage class docs: "All parameters are optional"
"""
email = EmailMessage()
self.assertIsInstance(email.message(), PyMessage) # force serialization.
email = EmailMultiAlternatives()
self.assertIsInstance(email.message(), PyMessage) # force serialization.
def test_positional_arguments_order(self):
"""
EmailMessage class docs: "… is initialized with the following
parameters (in the given order, if positional arguments are used)."
"""
connection = mail.get_connection()
email = EmailMessage(
# (If you need to insert/remove/reorder any params here,
# that indicates a breaking change to documented behavior.)
"subject",
"body\n",
"from@example.com",
["to@example.com"],
# (New options can be added below here as keyword-only args.)
bcc=["bcc@example.com"],
connection=connection,
attachments=[EmailAttachment("file.txt", "attachment\n", "text/plain")],
headers={"X-Header": "custom header"},
cc=["cc@example.com"],
reply_to=["reply-to@example.com"],
)
message = email.message()
self.assertEqual(message.get_all("Subject"), ["subject"])
self.assertEqual(message.get_all("From"), ["from@example.com"])
self.assertEqual(message.get_all("To"), ["to@example.com"])
self.assertEqual(message.get_all("X-Header"), ["custom header"])
self.assertEqual(message.get_all("Cc"), ["cc@example.com"])
self.assertEqual(message.get_all("Reply-To"), ["reply-to@example.com"])
self.assertEqual(message.get_payload(0).get_payload(), "body\n")
self.assertEqual(
self.get_decoded_attachments(email),
[("file.txt", "attachment\n", "text/plain")],
)
self.assertEqual(
email.recipients(), ["to@example.com", "cc@example.com", "bcc@example.com"]
)
self.assertIs(email.get_connection(), connection)
def test_all_params_can_be_set_before_send(self):
"""
EmailMessage class docs: "All parameters … can be set at any time
prior to calling the send() method."
"""
# This is meant to verify EmailMessage.__init__() doesn't apply any
# special processing that would be missing for properties set later.
original_connection = mail.get_connection(username="original")
new_connection = mail.get_connection(username="new")
email = EmailMessage(
"original subject",
"original body\n",
"original-from@example.com",
["original-to@example.com"],
bcc=["original-bcc@example.com"],
connection=original_connection,
attachments=[
EmailAttachment("original.txt", "original attachment\n", "text/plain")
],
headers={"X-Header": "original header"},
cc=["original-cc@example.com"],
reply_to=["original-reply-to@example.com"],
)
email.subject = "new subject"
email.body = "new body\n"
email.from_email = "new-from@example.com"
email.to = ["new-to@example.com"]
email.bcc = ["new-bcc@example.com"]
email.connection = new_connection
image = MIMEPart()
image.set_content(b"GIF89a...", "image", "gif")
email.attachments = [
("new1.txt", "new attachment 1\n", "text/plain"), # plain tuple
EmailAttachment("new2.txt", "new attachment 2\n", "text/csv"),
image,
]
email.extra_headers = {"X-Header": "new header"}
email.cc = ["new-cc@example.com"]
email.reply_to = ["new-reply-to@example.com"]
message = email.message()
self.assertEqual(message.get_all("Subject"), ["new subject"])
self.assertEqual(message.get_all("From"), ["new-from@example.com"])
self.assertEqual(message.get_all("To"), ["new-to@example.com"])
self.assertEqual(message.get_all("X-Header"), ["new header"])
self.assertEqual(message.get_all("Cc"), ["new-cc@example.com"])
self.assertEqual(message.get_all("Reply-To"), ["new-reply-to@example.com"])
self.assertEqual(message.get_payload(0).get_payload(), "new body\n")
self.assertEqual(
self.get_decoded_attachments(email),
[
("new1.txt", "new attachment 1\n", "text/plain"),
("new2.txt", "new attachment 2\n", "text/csv"),
(None, b"GIF89a...", "image/gif"),
],
)
self.assertEqual(
email.recipients(),
["new-to@example.com", "new-cc@example.com", "new-bcc@example.com"],
)
self.assertIs(email.get_connection(), new_connection)
self.assertNotIn("original", message.as_string())
def test_message_is_python_email_message(self):
"""
EmailMessage.message() docs: "returns a Python
email.message.EmailMessage object."
"""
email = EmailMessage()
message = email.message()
self.assertIsInstance(message, PyMessage)
self.assertEqual(message.policy, policy.default)
def test_message_policy_smtputf8(self):
# With SMTPUTF8, the message uses utf-8 directly in headers (not
# RFC 2047 encoded-words). Note this is the only spec-compliant way
# to send to a non-ASCII localpart.
email = EmailMessage(
subject="Detta ämne innehåller icke-ASCII-tecken",
to=["nøn-åscîi@example.com"],
)
message = email.message(policy=policy.SMTPUTF8)
self.assertEqual(message.policy, policy.SMTPUTF8)
msg_bytes = message.as_bytes()
self.assertIn(
"Subject: Detta ämne innehåller icke-ASCII-tecken".encode(), msg_bytes
)
self.assertIn("To: nøn-åscîi@example.com".encode(), msg_bytes)
self.assertNotIn(b"=?utf-8?", msg_bytes) # encoded-word prefix
def test_message_policy_cte_7bit(self):
"""
Allows a policy that requires 7bit encodings.
"""
email = EmailMessage(body="Detta innehåller icke-ASCII-tecken")
email.attach("file.txt", "يحتوي هذا المرفق على أحرف غير ASCII")
# Uses 8bit by default. (Test pre-condition.)
self.assertIn(b"Content-Transfer-Encoding: 8bit", email.message().as_bytes())
# Uses something 7bit compatible when policy requires it. Should pick
# the shorter of quoted-printable (for this body) or base64 (for this
# attachment), but must not use 8bit. (Decoding to "ascii" verifies
# that.)
policy_7bit = policy.default.clone(cte_type="7bit")
msg_bytes = email.message(policy=policy_7bit).as_bytes()
msg_ascii = msg_bytes.decode("ascii")
self.assertIn("Content-Transfer-Encoding: quoted-printable", msg_ascii)
self.assertIn("Content-Transfer-Encoding: base64", msg_ascii)
self.assertNotIn("Content-Transfer-Encoding: 8bit", msg_ascii)
def test_message_policy_compat32(self):
"""
Although EmailMessage.message() doesn't support policy=compat32
(because compat32 doesn't support modern APIs), compat32 _can_ be
used with as_bytes() or as_string() on the resulting message.
"""
# This subject results in different (but equivalent) RFC 2047 encoding
# with compat32 vs. email.policy.default.
email = EmailMessage(subject="Detta ämne innehåller icke-ASCII-tecken")
message = email.message()
self.assertIn(
b"Subject: =?utf-8?q?Detta_=C3=A4mne_inneh=C3=A5ller_icke-ASCII-tecken?=\n",
message.as_bytes(policy=policy.compat32),
)
self.assertIn(
"Subject: =?utf-8?q?Detta_=C3=A4mne_inneh=C3=A5ller_icke-ASCII-tecken?=\n",
message.as_string(policy=policy.compat32),
)
# RemovedInDjango70Warning.
| MailTests |
python | sympy__sympy | sympy/core/logic.py | {
"start": 7836,
"end": 8939
} | class ____(Logic):
def __new__(cls, *args):
bargs = []
for a in args:
if a == cls.op_x_notx:
return a
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = sorted(set(cls.flatten(bargs)), key=hash)
for a in args:
if Not(a) in args:
return cls.op_x_notx
if len(args) == 1:
return args.pop()
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, *args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args
| AndOr_Base |
python | PyCQA__pylint | pylint/checkers/format.py | {
"start": 4191,
"end": 28293
} | class ____(BaseTokenChecker, BaseRawFileChecker):
"""Formatting checker.
Checks for :
* unauthorized constructions
* strict indentation
* line length
"""
# configuration section name
name = "format"
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (
(
"max-line-length",
{
"default": 100,
"type": "int",
"metavar": "<int>",
"help": (
"Maximum number of characters on a single line. "
"Pylint's default of 100 is based on PEP 8's guidance that teams "
"may choose line lengths up to 99 characters."
),
},
),
(
"ignore-long-lines",
{
"type": "regexp",
"metavar": "<regexp>",
"default": r"^\s*(# )?<?https?://\S+>?$",
"help": (
"Regexp for a line that is allowed to be longer than the limit."
),
},
),
(
"single-line-if-stmt",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": (
"Allow the body of an if to be on the same "
"line as the test if there is no else."
),
},
),
(
"single-line-class-stmt",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": (
"Allow the body of a class to be on the same "
"line as the declaration if body contains "
"single statement."
),
},
),
(
"max-module-lines",
{
"default": 1000,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of lines in a module.",
},
),
(
"indent-string",
{
"default": " ",
"type": "non_empty_string",
"metavar": "<string>",
"help": "String used as indentation unit. This is usually "
'" " (4 spaces) or "\\t" (1 tab).',
},
),
(
"indent-after-paren",
{
"type": "int",
"metavar": "<int>",
"default": 4,
"help": "Number of spaces of indent required inside a hanging "
"or continued line.",
},
),
(
"expected-line-ending-format",
{
"type": "choice",
"metavar": "<empty or LF or CRLF>",
"default": "",
"choices": ["", "LF", "CRLF"],
"help": (
"Expected format of line ending, "
"e.g. empty (any line ending), LF or CRLF."
),
},
),
)
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._lines: dict[int, str] = {}
self._visited_lines: dict[int, Literal[1, 2]] = {}
def new_line(self, tokens: TokenWrapper, line_end: int, line_start: int) -> None:
"""A new line has been encountered, process it if necessary."""
if _last_token_on_line_is(tokens, line_end, ";"):
self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split("\n")[0]
self.check_lines(tokens, line_start, line, line_num)
def process_module(self, node: nodes.Module) -> None:
pass
# pylint: disable-next = too-many-return-statements, too-many-branches
def _check_keyword_parentheses(
self, tokens: list[tokenize.TokenInfo], start: int
) -> None:
"""Check that there are not unnecessary parentheses after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line and contains no commas (i.e. is not a tuple).
Args:
tokens: The entire list of Tokens.
start: The position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if tokens[start + 1].string != "(":
return
if (
tokens[start].string == "not"
and start > 0
and tokens[start - 1].string == "is"
):
# If this is part of an `is not` expression, we have a binary operator
# so the parentheses are not necessarily redundant.
return
found_and_or = False
contains_walrus_operator = False
walrus_operator_depth = 0
contains_double_parens = 0
depth = 0
keyword_token = str(tokens[start].string)
line_num = tokens[start].start[0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token.type == tokenize.NL:
return
# Since the walrus operator doesn't exist below python3.8, the tokenizer
# generates independent tokens
if (
token.string == ":=" # <-- python3.8+ path
or token.string + tokens[i + 1].string == ":="
):
contains_walrus_operator = True
walrus_operator_depth = depth
if token.string == "(":
depth += 1
if tokens[i + 1].string == "(":
contains_double_parens = 1
elif token.string == ")":
depth -= 1
if depth:
if contains_double_parens and tokens[i + 1].string == ")":
# For walrus operators in `if (not)` conditions and comprehensions
if keyword_token in {"in", "if", "not"}:
continue
return
contains_double_parens -= 1
continue
# ')' can't happen after if (foo), since it would be a syntax error.
if tokens[i + 1].string in {":", ")", "]", "}", "in"} or tokens[
i + 1
].type in {tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT}:
if contains_walrus_operator and walrus_operator_depth - 1 == depth:
return
# The empty tuple () is always accepted.
if i == start + 2:
return
if found_and_or:
return
if keyword_token == "in":
# This special case was added in https://github.com/pylint-dev/pylint/pull/4948
# but it could be removed in the future. Avoid churn for now.
return
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
return
elif depth == 1:
match token[1]:
case ",":
# This is a tuple, which is always acceptable.
return
case "and" | "or":
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
found_and_or = True
case "yield":
# A yield inside an expression must always be in parentheses,
# quit early without error.
return
case "for":
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
return
case "else":
# A generator expression can have an 'else' token in it.
# We check the rest of the tokens to see if any problems occur after
# the 'else'.
if "(" in (i.string for i in tokens[i:]):
self._check_keyword_parentheses(tokens[i:], 0)
return
def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Process tokens and search for:
- too long lines (i.e. longer than <max_chars>)
- optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
self._last_line_ending: str | None = None
last_blank_line_num = 0
for idx, (tok_type, string, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
else:
self.new_line(TokenWrapper(tokens), idx - 1, idx)
match tok_type:
case tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._check_line_ending(string, line_num)
case tokenize.INDENT:
check_equal = False
self.check_indent_level(string, indents[-1] + 1, line_num)
indents.append(indents[-1] + 1)
case tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
case tokenize.NL:
if not line.strip("\r\n"):
last_blank_line_num = line_num
case tokenize.COMMENT | tokenize.ENCODING:
pass
case _:
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading white-space
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and string.endswith("l"):
self.add_message("lowercase-l-suffix", line=line_num)
if string in _KEYWORD_TOKENS:
self._check_keyword_parentheses(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.linter.config.max_module_lines:
# Get the line where the too-many-lines (or its message id)
# was disabled or default to 1.
message_definition = self.linter.msgs_store.get_message_definitions(
"too-many-lines"
)[0]
names = (message_definition.msgid, "too-many-lines")
lineno = next(
filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
1,
)
self.add_message(
"too-many-lines",
args=(line_num, self.linter.config.max_module_lines),
line=lineno,
)
# See if there are any trailing lines. Do not complain about empty
# files like __init__.py markers.
if line_num == last_blank_line_num and line_num > 0:
self.add_message("trailing-newlines", line=line_num)
def _check_line_ending(self, line_ending: str, line_num: int) -> None:
# check if line endings are mixed
if self._last_line_ending is not None:
# line_ending == "" indicates a synthetic newline added at
# the end of a file that does not, in fact, end with a
# newline.
if line_ending and line_ending != self._last_line_ending:
self.add_message("mixed-line-endings", line=line_num)
self._last_line_ending = line_ending
# check if line ending is as expected
expected = self.linter.config.expected_line_ending_format
if expected:
# reduce multiple \n\n\n\n to one \n
line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
line_ending = "LF" if line_ending == "\n" else "CRLF"
if line_ending != expected:
self.add_message(
"unexpected-line-ending-format",
args=(line_ending, expected),
line=line_num,
)
@only_required_for_messages("multiple-statements")
def visit_default(self, node: nodes.NodeNG) -> None:
"""Check the node line number and check it if not yet done."""
if not node.is_statement:
return
if not node.root().pure_python:
return
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
elif isinstance(
node.parent, nodes.Try
) and self._is_first_node_in_else_finally_body(node, node.parent):
prev_line = self._infer_else_finally_line_number(node, node.parent)
elif isinstance(node.parent, nodes.Module):
prev_line = 0
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines: list[str] = []
for line in range(line, tolineno + 1): # noqa: B020
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append("")
def _is_first_node_in_else_finally_body(
self, node: nodes.NodeNG, parent: nodes.Try
) -> bool:
if parent.orelse and node == parent.orelse[0]:
return True
if parent.finalbody and node == parent.finalbody[0]:
return True
return False
def _infer_else_finally_line_number(
self, node: nodes.NodeNG, parent: nodes.Try
) -> int:
last_line_of_prev_block = 0
if node in parent.finalbody and parent.orelse:
last_line_of_prev_block = parent.orelse[-1].tolineno
elif parent.handlers and parent.handlers[-1].body:
last_line_of_prev_block = parent.handlers[-1].body[-1].tolineno
elif parent.body:
last_line_of_prev_block = parent.body[-1].tolineno
return last_line_of_prev_block + 1 if last_line_of_prev_block else 0
def _check_multi_statement_line(self, node: nodes.NodeNG, line: int) -> None:
"""Check for lines containing multiple statements."""
match node:
case nodes.With():
# Do not warn about multiple nested context managers in with statements.
return
case nodes.NodeNG(
parent=nodes.If(orelse=[])
) if self.linter.config.single_line_if_stmt:
return
case nodes.NodeNG(
parent=nodes.ClassDef(body=[_])
) if self.linter.config.single_line_class_stmt:
return
case nodes.Expr(
parent=nodes.FunctionDef() | nodes.ClassDef(),
value=nodes.Const(value=value),
) if (
value is Ellipsis
):
# Functions stubs and class with ``Ellipsis`` as body are exempted.
return
self.add_message("multiple-statements", node=node, confidence=HIGH)
self._visited_lines[line] = 2
def check_trailing_whitespace_ending(self, line: str, i: int) -> None:
"""Check that there is no trailing white-space."""
# exclude \f (formfeed) from the rstrip
stripped_line = line.rstrip("\t\n\r\v ")
if line[len(stripped_line) :] not in ("\n", "\r\n"):
self.add_message(
"trailing-whitespace",
line=i,
col_offset=len(stripped_line),
confidence=HIGH,
)
def check_line_length(self, line: str, i: int, checker_off: bool) -> None:
"""Check that the line length is less than the authorized value."""
max_chars = self.linter.config.max_line_length
ignore_long_line = self.linter.config.ignore_long_lines
line = line.rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
if checker_off:
self.linter.add_ignored_message("line-too-long", i)
else:
self.add_message("line-too-long", line=i, args=(len(line), max_chars))
@staticmethod
def remove_pylint_option_from_lines(options_pattern_obj: Match[str]) -> str:
"""Remove the `# pylint ...` pattern from lines."""
lines = options_pattern_obj.string
purged_lines = (
lines[: options_pattern_obj.start(1)].rstrip()
+ lines[options_pattern_obj.end(1) :]
)
return purged_lines
@staticmethod
def is_line_length_check_activated(pylint_pattern_match_object: Match[str]) -> bool:
"""Return True if the line length check is activated."""
try:
for pragma in parse_pragma(pylint_pattern_match_object.group(2)):
if pragma.action == "disable" and "line-too-long" in pragma.messages:
return False
except PragmaParserError:
# Printing useful information dealing with this error is done in the lint package
pass
return True
@staticmethod
def specific_splitlines(lines: str) -> list[str]:
"""Split lines according to universal newlines except those in a specific
sets.
"""
unsplit_ends = {
"\x0b", # synonym of \v
"\x0c", # synonym of \f
"\x1c",
"\x1d",
"\x1e",
"\x85",
"\u2028",
"\u2029",
}
res: list[str] = []
buffer = ""
for atomic_line in lines.splitlines(True):
if atomic_line[-1] not in unsplit_ends:
res.append(buffer + atomic_line)
buffer = ""
else:
buffer += atomic_line
return res
def check_lines(
self, tokens: TokenWrapper, line_start: int, lines: str, lineno: int
) -> None:
"""Check given lines for potential messages.
Check if lines have:
- a final newline
- no trailing white-space
- less than a maximum number of characters
"""
# we're first going to do a rough check whether any lines in this set
# go over the line limit. If none of them do, then we don't need to
# parse out the pylint options later on and can just assume that these
# lines are clean
# we'll also handle the line ending check here to avoid double-iteration
# unless the line lengths are suspect
max_chars = self.linter.config.max_line_length
split_lines = self.specific_splitlines(lines)
for offset, line in enumerate(split_lines):
if not line.endswith("\n"):
self.add_message("missing-final-newline", line=lineno + offset)
continue
# We don't test for trailing whitespaces in strings
# See https://github.com/pylint-dev/pylint/issues/6936
# and https://github.com/pylint-dev/pylint/issues/3822
if tokens.type(line_start) != tokenize.STRING:
self.check_trailing_whitespace_ending(line, lineno + offset)
# This check is purposefully simple and doesn't rstrip since this is running
# on every line you're checking it's advantageous to avoid doing a lot of work
potential_line_length_warning = any(
len(line) > max_chars for line in split_lines
)
# if there were no lines passing the max_chars config, we don't bother
# running the full line check (as we've met an even more strict condition)
if not potential_line_length_warning:
return
# Line length check may be deactivated through `pylint: disable` comment
mobj = OPTION_PO.search(lines)
checker_off = False
if mobj:
if not self.is_line_length_check_activated(mobj):
checker_off = True
# The 'pylint: disable whatever' should not be taken into account for line length count
lines = self.remove_pylint_option_from_lines(mobj)
# here we re-run specific_splitlines since we have filtered out pylint options above
for offset, line in enumerate(self.specific_splitlines(lines)):
self.check_line_length(line, lineno + offset, checker_off)
def check_indent_level(self, string: str, expected: int, line_num: int) -> None:
"""Return the indent level of the string."""
indent = self.linter.config.indent_string
if indent == "\\t": # \t is not interpreted in the configuration file
indent = "\t"
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ""
while string and string[0] in " \t":
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = "spaces"
if indent[0] == "\t":
i_type = "tabs"
self.add_message(
"bad-indentation",
line=line_num,
args=(level * unit_size + len(suppl), i_type, expected * unit_size),
)
def register(linter: PyLinter) -> None:
linter.register_checker(FormatChecker(linter))
| FormatChecker |
python | getsentry__sentry | tests/sentry/api/helpers/test_group_index.py | {
"start": 14763,
"end": 20994
} | class ____(TestCase):
@patch("sentry.api.helpers.group_index.update.handle_merge")
def test_simple(self, mock_handle_merge: MagicMock) -> None:
group_ids = [self.create_group().id, self.create_group().id]
project = self.project
request = self.make_request(method="PUT")
request.user = self.user
request.data = {"merge": 1}
request.GET = {"id": group_ids, "project": [project.id]}
group_list = get_group_list(self.organization.id, [project], group_ids)
update_groups(request, group_list)
call_args = mock_handle_merge.call_args.args
assert len(call_args) == 3
# Have to convert to ids because first argument is a queryset
assert [group.id for group in call_args[0]] == group_ids
assert call_args[1] == {project.id: project}
assert call_args[2] == self.user
@patch("sentry.api.helpers.group_index.update.handle_merge")
def test_multiple_projects(self, mock_handle_merge: MagicMock) -> None:
project1 = self.create_project()
project2 = self.create_project()
projects = [project1, project2]
project_ids = [project.id for project in projects]
group_ids = [
self.create_group(project1).id,
self.create_group(project2).id,
]
request = self.make_request(method="PUT")
request.user = self.user
request.data = {"merge": 1}
request.GET = {"id": group_ids, "project": project_ids}
group_list = get_group_list(self.organization.id, projects, group_ids)
response = update_groups(request, group_list)
assert response.data == {"detail": "Merging across multiple projects is not supported"}
assert response.status_code == 400
assert mock_handle_merge.call_count == 0
@patch("sentry.api.helpers.group_index.update.handle_merge")
def test_multiple_groups_same_project(self, mock_handle_merge: MagicMock) -> None:
"""Even if the UI calls with multiple projects, if the groups belong to the same project, we should merge them."""
projects = [self.create_project(), self.create_project()]
proj1 = projects[0]
groups = [self.create_group(proj1), self.create_group(proj1)]
group_ids = [g.id for g in groups]
project_ids = [p.id for p in projects]
request = self.make_request(method="PUT")
request.user = self.user
request.data = {"merge": 1}
# The two groups belong to the same project, so we should be able to merge them, even though
# we're passing multiple project ids
request.GET = {"id": group_ids, "project": project_ids}
group_list = get_group_list(self.organization.id, projects, group_ids)
update_groups(request, group_list)
call_args = mock_handle_merge.call_args.args
assert len(call_args) == 3
# Have to convert to ids because first argument is a queryset
assert [group.id for group in call_args[0]] == group_ids
assert call_args[1] == {proj1.id: proj1}
assert call_args[2] == self.user
@patch("sentry.api.helpers.group_index.update.handle_merge")
def test_no_project_ids_passed(self, mock_handle_merge: MagicMock) -> None:
"""If 'All Projects' is selected in the issue stream, the UI doesn't send project ids, but
we should be able to derive them from the given group ids."""
group_ids = [self.create_group().id, self.create_group().id]
project = self.project
request = self.make_request(method="PUT")
request.user = self.user
request.data = {"merge": 1}
request.GET = {"id": group_ids}
group_list = get_group_list(self.organization.id, [project], group_ids)
update_groups(request, group_list)
call_args = mock_handle_merge.call_args.args
assert len(call_args) == 3
# Have to convert to ids because first argument is a queryset
assert [group.id for group in call_args[0]] == group_ids
assert call_args[1] == {project.id: project}
assert call_args[2] == self.user
def test_metrics(self) -> None:
for referer, expected_referer_tag in [
("https://sentry.io/organizations/dogsaregreat/issues/", "issue stream"),
("https://dogsaregreat.sentry.io/issues/", "issue stream"),
(
"https://sentry.io/organizations/dogsaregreat/issues/12311121/similar/",
"similar issues tab",
),
(
"https://dogsaregreat.sentry.io/issues/12311121/similar/",
"similar issues tab",
),
(
"https://sentry.io/organizations/dogsaregreat/some/other/path/",
"unknown",
),
(
"https://dogsaregreat.sentry.io/some/other/path/",
"unknown",
),
(
"",
"unknown",
),
]:
group_ids = [
self.create_group(
platform="javascript",
metadata={"sdk": {"name_normalized": "sentry.javascript.nextjs"}},
).id,
self.create_group(platform="javascript").id,
]
project = self.project
request = self.make_request(method="PUT")
request.user = self.user
request.data = {"merge": 1}
request.GET = {"id": group_ids, "project": [project.id]}
request.META = {"HTTP_REFERER": referer}
with patch("sentry.api.helpers.group_index.update.metrics.incr") as mock_metrics_incr:
group_list = get_group_list(self.organization.id, [project], group_ids)
update_groups(request, group_list)
mock_metrics_incr.assert_any_call(
"grouping.merge_issues",
sample_rate=1.0,
tags={
"platform": "javascript",
"referer": expected_referer_tag,
"sdk": "sentry.javascript.nextjs",
},
)
| MergeGroupsTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 331861,
"end": 353200
} | class ____(Request):
"""
Return first frame per unique URI for the given dataview specification. Note: 'count_range' option for label rules is not supported and does not affect the returned snippets
:param dataview: Dataview specification
:type dataview: Dataview
:param page_size: The amount of snippets to return for the page. Cannot be
changed after the first call (after the paging session is created). default=50,
Optional. To change the page size use 'paging_id'=0 that will reset the paging
session.
:type page_size: int
:param page: The page to return. default=0, Optional
:type page: int
:param paging_id: Paging session id for getting the next page of frames
:type paging_id: str
:param projection: Used to select which parts of the frame will be returned.
Each string represents a field or sub-field (using dot-separated notation). In
order to specify a specific array element, use array index as a field name. To
specify all array elements, use '*'.
:type projection: Sequence[str]
:param aggregate_on_context_id: If set to Truethen one frame is returned per
unique context_id. If set to Falseall frames are retuned. If not set then
defaults to the server configured value
:type aggregate_on_context_id: bool
"""
_service = "frames"
_action = "get_snippets_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"dataview": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/dv_augmentation"},
{"type": "null"},
],
},
"filters": {
"description": "List of FilterRule ('OR' relationship)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": ["object", "null"],
},
"mapping": {
"description": "Mapping parameters",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
"versions": {
"description": "View dataset versions",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/dv_augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"aggregate_on_context_id": {
"description": (
"If set to Truethen one frame is returned per unique context_id. If set to Falseall frames are"
" retuned. If not set then defaults to the server configured value"
),
"type": "boolean",
},
"dataview": {
"$ref": "#/definitions/dataview",
"description": "Dataview specification",
},
"page": {
"default": 0,
"description": "The page to return. default=0, Optional",
"type": "integer",
},
"page_size": {
"default": 50,
"description": (
"The amount of snippets to return for the page. Cannot be changed after the first call (after the"
" paging session is created). default=50, Optional. To change the page size use 'paging_id'=0 that"
" will reset the paging session."
),
"type": "integer",
},
"paging_id": {
"description": "Paging session id for getting the next page of frames",
"type": "string",
},
"projection": {
"description": (
"Used to select which parts of the frame will be returned. Each string represents a\n "
" field or sub-field (using dot-separated notation). In order to specify a specific array"
" element,\n use array index as a field name. To specify all array elements, use"
" '*'."
),
"items": {"type": "string"},
"type": "array",
},
},
"required": ["dataview"],
"type": "object",
}
def __init__(
self,
dataview,
page_size=50,
page=0,
paging_id=None,
projection=None,
aggregate_on_context_id=None,
**kwargs
):
super(GetSnippetsForDataviewRequest, self).__init__(**kwargs)
self.dataview = dataview
self.page_size = page_size
self.page = page
self.paging_id = paging_id
self.projection = projection
self.aggregate_on_context_id = aggregate_on_context_id
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
@schema_property("page_size")
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("paging_id")
def paging_id(self):
return self._property_paging_id
@paging_id.setter
def paging_id(self, value):
if value is None:
self._property_paging_id = None
return
self.assert_isinstance(value, "paging_id", six.string_types)
self._property_paging_id = value
@schema_property("projection")
def projection(self):
return self._property_projection
@projection.setter
def projection(self, value):
if value is None:
self._property_projection = None
return
self.assert_isinstance(value, "projection", (list, tuple))
self.assert_isinstance(value, "projection", six.string_types, is_array=True)
self._property_projection = value
@schema_property("aggregate_on_context_id")
def aggregate_on_context_id(self):
return self._property_aggregate_on_context_id
@aggregate_on_context_id.setter
def aggregate_on_context_id(self, value):
if value is None:
self._property_aggregate_on_context_id = None
return
self.assert_isinstance(value, "aggregate_on_context_id", (bool,))
self._property_aggregate_on_context_id = value
| GetSnippetsForDataviewRequest |
python | getsentry__sentry | src/sentry/incidents/models/incident.py | {
"start": 5147,
"end": 5240
} | class ____(Enum):
OPEN = 1
CLOSED = 2
WARNING = 10
CRITICAL = 20
| IncidentStatus |
python | getsentry__sentry | src/sentry/utils/redis.py | {
"start": 1613,
"end": 3017
} | class ____:
def __init__(self, options_manager: OptionsManager) -> None:
self._clusters: dict[str, rb.Cluster] = {}
self._options_manager = options_manager
def _factory(
self,
*,
hosts: list[dict[int, Any]] | dict[int, Any] | None = None,
**config: Any,
) -> rb.Cluster:
if not hosts:
hosts = []
# rb expects a dict of { host, port } dicts where the key is the host
# ID. Coerce the configuration into the correct format if necessary.
hosts = {k: v for k, v in enumerate(hosts)} if isinstance(hosts, list) else hosts
config["hosts"] = hosts
pool_options: dict[str, Any] = config.pop("client_args", {})
pool_options = {**_REDIS_DEFAULT_CLIENT_ARGS, **pool_options}
config["pool_options"] = pool_options
return rb.Cluster(**config, pool_cls=_shared_pool)
def get(self, key: str) -> rb.Cluster:
try:
return self._clusters[key]
except KeyError:
pass
cfg = self._options_manager.get("redis.clusters", {}).get(key)
if cfg is None:
raise KeyError(f"Invalid cluster name: {key}")
if cfg.get("is_redis_cluster", False):
raise KeyError("Invalid cluster type, expected rb cluster")
ret = self._clusters[key] = self._factory(**cfg)
return ret
| RBClusterManager |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 1608,
"end": 1674
} | class ____(Parent):
name = models.CharField(max_length=10)
| Child |
python | astral-sh__uv | scripts/benchmark/src/benchmark/tools.py | {
"start": 513,
"end": 2143
} | class ____(abc.ABC):
"""Abstract base class for packaging tools."""
def command(self, benchmark: Benchmark, *, cwd: str) -> Command | None:
"""Generate a command to benchmark a given tool."""
match benchmark:
case Benchmark.INSTALL_COLD:
return self.install_cold(cwd=cwd)
case Benchmark.INSTALL_WARM:
return self.install_warm(cwd=cwd)
case Benchmark.RUN:
return self.run(cwd=cwd)
case _:
raise ValueError(f"Invalid benchmark: {benchmark}")
@abc.abstractmethod
def install_cold(self, *, cwd: str) -> Command | None:
"""Resolve a set of dependencies using pip-tools, from a cold cache.
The resolution is performed from scratch, i.e., without an existing lockfile,
and the cache directory is cleared between runs.
"""
@abc.abstractmethod
def install_warm(self, *, cwd: str) -> Command | None:
"""Resolve a set of dependencies using pip-tools, from a warm cache.
The resolution is performed from scratch, i.e., without an existing lockfile;
however, the cache directory is _not_ cleared between runs.
"""
@abc.abstractmethod
def run(self, *, cwd: str) -> Command | None:
"""Resolve a modified lockfile using pip-tools, from a warm cache.
The resolution is performed with an existing lockfile, and the cache directory
is _not_ cleared between runs. However, a new dependency is added to the set
of input requirements, which does not appear in the lockfile.
"""
| Suite |
python | huggingface__transformers | tests/models/markuplm/test_processing_markuplm.py | {
"start": 1308,
"end": 6179
} | class ____(unittest.TestCase):
tokenizer_class = MarkupLMTokenizer
rust_tokenizer_class = MarkupLMTokenizerFast
def setUp(self):
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "<unk>",] # fmt: skip
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.tags_dict = {"a": 0, "abbr": 1, "acronym": 2, "address": 3}
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
self.tokenizer_config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
with open(self.tokenizer_config_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps({"tags_dict": self.tags_dict}))
feature_extractor = MarkupLMFeatureExtractor()
processor = MarkupLMProcessor(tokenizer=self.get_tokenizer(), feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs) -> PythonBackend:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> list[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_feature_extractor(self, **kwargs):
return MarkupLMFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
feature_extractor = self.get_feature_extractor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = MarkupLMProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (MarkupLMTokenizer, MarkupLMTokenizerFast))
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = MarkupLMProcessor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
# slow tokenizer
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = MarkupLMProcessor.from_pretrained(
self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, MarkupLMTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
# fast tokenizer
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = MarkupLMProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, MarkupLMTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
# different use cases tests
@require_bs4
@require_torch
| MarkupLMProcessorTest |
python | keras-team__keras | keras/src/distribution/distribution_lib.py | {
"start": 28832,
"end": 34542
} | class ____(collections.abc.MutableMapping):
"""A dict-like object that maps string to `TensorLayout` instances.
`LayoutMap` uses a string as key and a `TensorLayout` as value. There is a
behavior difference between a normal Python dict and this class. The string
key will be treated as a regex when retrieving the value. See the docstring
of `get` for more details.
See below for a usage example. You can define the naming schema
of the `TensorLayout`, and then retrieve the corresponding
`TensorLayout` instance.
In the normal case, the key to query is usually the `variable.path`, which
is the identifier of the variable.
As shortcut, tuple or list of axis names are also allowed when inserting
as value, and will be converted to `TensorLayout`.
```python
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
layout_1 = layout_map['dense_1.kernel'] # layout_1 == layout_2d
layout_2 = layout_map['dense_1.bias'] # layout_2 == layout_1d
layout_3 = layout_map['dense_2.kernel'] # layout_3 == layout_2d
layout_4 = layout_map['dense_2.bias'] # layout_4 == layout_1d
layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d
layout_6 = layout_map['my_model/conv2d_123/bias'] # layout_6 == layout_1d
layout_7 = layout_map['my_model/conv3d_1/kernel'] # layout_7 == None
layout_8 = layout_map['my_model/conv3d_1/bias'] # layout_8 == None
```
Args:
device_mesh: `keras.distribution.DeviceMesh` instance.
"""
def __init__(self, device_mesh):
self._layout_map = collections.OrderedDict()
self._device_mesh = device_mesh
def __getitem__(self, key):
"""Retrieves the corresponding layout by the string key.
When there isn't an exact match, all the existing keys in the layout map
will be treated as a regex and map against the input key again. When
there are multiple matches for the regex, an `ValueError` will be
raised. Returns `None` if there isn't any match found.
Args:
key: String key to query a layout.
Returns:
Corresponding layout based on the query.
"""
if key in self._layout_map:
return self._layout_map[key]
matching_keys = []
for k in self._layout_map:
if re.search(k, key):
matching_keys.append(k)
if len(matching_keys) > 1:
raise ValueError(
f"Path '{key}' matches multiple layout "
f"specification keys: {matching_keys}. Please make "
"sure each tensor/variable path only matches at most "
"one layout specification key in the LayoutMap."
)
elif len(matching_keys) == 1:
return self._layout_map[matching_keys[0]]
return None
def __setitem__(self, key, layout):
"""Insert TensorLayout to the LayoutMap.
Args:
key: String key for the `TensorLayout`.
layout: The `TensorLayout`. As a shortcut, tuple of string and None
are also acceptable, and will be converted to `TensorLayout`.
"""
if key in self._layout_map:
raise ValueError(
f"{key} already exist in the LayoutMap with "
f"value {self._layout_map[key]}. Please make sure to "
"not use duplicated keys."
)
if isinstance(layout, tuple):
layout = TensorLayout(axes=layout, device_mesh=None)
if not isinstance(layout, TensorLayout):
raise ValueError(
f"{layout} should be a TensorLayout type, got {type(layout)}"
)
self._maybe_populate_device_mesh(layout)
self._layout_map[key] = layout
def __delitem__(self, key):
# let the dict to handle the key missing error
return self._layout_map.pop(key)
def __len__(self):
return len(self._layout_map)
def __iter__(self):
return iter(self._layout_map)
@property
def device_mesh(self):
return self._device_mesh
def _maybe_populate_device_mesh(self, layout):
if layout.device_mesh is None and self.device_mesh is not None:
layout.device_mesh = self.device_mesh
LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__
@keras_export("keras.distribution.distribute_tensor")
def distribute_tensor(tensor, layout):
"""Change the layout of a Tensor value in the jit function execution.
Args:
tensor: a Tensor to change the layout.
layout: `TensorLayout` to be applied on the value.
Returns:
a new value with the specified tensor layout.
"""
if isinstance(tensor, KerasTensor):
# keras tensor is only used for building functional model, and can't be
# used to alter layout/sharding.
return tensor
return distribution_lib.distribute_tensor(tensor, layout)
@keras_export("keras.distribution.distribution")
def distribution():
"""Retrieve the current distribution from global context."""
return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME)
@keras_export("keras.distribution.set_distribution")
def set_distribution(value):
"""Set the distribution as the global distribution setting.
Args:
value: a `Distribution` instance.
"""
global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)
| LayoutMap |
python | jazzband__django-simple-history | simple_history/tests/view.py | {
"start": 2592,
"end": 2679
} | class ____(UpdateView):
model = Poll
fields = ["question", "pub_date"]
| PollUpdate |
python | scrapy__scrapy | tests/test_spider.py | {
"start": 18662,
"end": 32186
} | class ____(TestSpider):
spider_class = SitemapSpider
BODY = b"SITEMAP"
f = BytesIO()
g = gzip.GzipFile(fileobj=f, mode="w+b")
g.write(BODY)
g.close()
GZBODY = f.getvalue()
def assertSitemapBody(self, response: Response, body: bytes | None) -> None:
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
assert spider._get_sitemap_body(response) == body
def test_get_sitemap_body(self):
r = XmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
r = HtmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, None)
r = Response(url="http://www.example.com/favicon.ico", body=self.BODY)
self.assertSitemapBody(r, None)
def test_get_sitemap_body_gzip_headers(self):
r = Response(
url="http://www.example.com/sitemap",
body=self.GZBODY,
headers={"content-type": "application/gzip"},
request=Request("http://www.example.com/sitemap"),
)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url(self):
r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url_compressed(self):
r = Response(
url="http://www.example.com/sitemap.xml.gz",
body=self.GZBODY,
request=Request("http://www.example.com/sitemap"),
)
self.assertSitemapBody(r, self.BODY)
# .xml.gz but body decoded by HttpCompression middleware already
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_urls_from_robotstxt(self):
robots = b"""# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
Sitemap: HTTP://example.com/sitemap-uppercase.xml
Sitemap: /sitemap-relative-url.xml
"""
r = TextResponse(url="http://www.example.com/robots.txt", body=robots)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://example.com/sitemap.xml",
"http://example.com/sitemap-product-index.xml",
"http://example.com/sitemap-uppercase.xml",
"http://www.example.com/sitemap-relative-url.xml",
]
def test_alternate_url_locs(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/"/>
<xhtml:link rel="alternate" hreflang="de-ch"
href="http://www.example.com/schweiz-deutsch/"/>
<xhtml:link rel="alternate" hreflang="it"
href="http://www.example.com/italiano/"/>
<xhtml:link rel="alternate" hreflang="it"/><!-- wrong tag without href -->
</url>
</urlset>"""
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/"
]
spider.sitemap_alternate_links = True
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/",
"http://www.example.com/deutsch/",
"http://www.example.com/schweiz-deutsch/",
"http://www.example.com/italiano/",
]
def test_sitemap_filter(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<lastmod>2010-01-01</lastmod>
</url>
<url>
<loc>http://www.example.com/portuguese/</loc>
<lastmod>2005-01-01</lastmod>
</url>
</urlset>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
date_time = datetime.strptime(entry["lastmod"], "%Y-%m-%d")
if date_time.year > 2008:
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/",
"http://www.example.com/portuguese/",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/"
]
def test_sitemap_filter_with_alternate_links(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/article_1/</loc>
<lastmod>2010-01-01</lastmod>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/article_1/"/>
</url>
<url>
<loc>http://www.example.com/english/article_2/</loc>
<lastmod>2015-01-01</lastmod>
</url>
</urlset>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
alternate_links = entry.get("alternate", ())
for link in alternate_links:
if "/deutsch/" in link:
entry["loc"] = link
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/article_1/",
"http://www.example.com/english/article_2/",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/deutsch/article_1/"
]
def test_sitemapindex_filter(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml</loc>
<lastmod>2004-01-01T20:00:00+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
date_time = datetime.strptime(
entry["lastmod"].split("T")[0], "%Y-%m-%d"
)
if date_time.year > 2004:
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/sitemap1.xml",
"http://www.example.com/sitemap2.xml",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/sitemap2.xml"
]
def test_compression_bomb_setting(self):
settings = {"DOWNLOAD_MAXSIZE": 10_000_000}
crawler = get_crawler(settings_dict=settings)
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr(self):
class DownloadMaxSizeSpider(self.spider_class):
download_maxsize = 10_000_000
crawler = get_crawler()
spider = DownloadMaxSizeSpider.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
def test_compression_bomb_request_meta(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_maxsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
def test_download_warnsize_setting(self):
settings = {"DOWNLOAD_WARNSIZE": 10_000_000}
crawler = get_crawler(settings_dict=settings)
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr(self):
class DownloadWarnSizeSpider(self.spider_class):
download_warnsize = 10_000_000
crawler = get_crawler()
spider = DownloadWarnSizeSpider.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_warnsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
def test_download_warnsize_request_meta(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_warnsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
@deferred_f_from_coro_f
async def test_sitemap_urls(self):
class TestSpider(self.spider_class):
name = "test"
sitemap_urls = ["https://toscrape.com/sitemap.xml"]
crawler = get_crawler(TestSpider)
spider = TestSpider.from_crawler(crawler)
with warnings.catch_warnings():
warnings.simplefilter("error")
requests = [request async for request in spider.start()]
assert len(requests) == 1
request = requests[0]
assert request.url == "https://toscrape.com/sitemap.xml"
assert request.dont_filter is False
assert request.callback == spider._parse_sitemap
| TestSitemapSpider |
python | python__mypy | mypy/plugin.py | {
"start": 16711,
"end": 17252
} | class ____(NamedTuple):
args: list[list[Expression]] # Actual expressions for each formal argument
default_signature: CallableType # Original signature of the method
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a function hook that infers the return type of a function with
# a special signature.
#
# A no-op callback would just return the inferred return type, but a useful
# callback at least sometimes can infer a more precise type.
| FunctionSigContext |
python | ray-project__ray | release/long_running_tests/workloads/serve_failure.py | {
"start": 2839,
"end": 6344
} | class ____:
def __init__(self, random_killer_handle, max_applications=1):
self.max_applications = max_applications
self.weighted_actions = [
(self.create_application, 1),
(self.verify_application, 4),
]
self.applications = []
self.random_killer = random_killer_handle
# Deploy in parallel to avoid long test startup time.
self.wait_for_applications_running(
[self.create_application(blocking=False) for _ in range(max_applications)]
)
self.random_killer.run.remote()
def wait_for_applications_running(self, application_names: List[str]):
client = _get_global_client()
for name in application_names:
client._wait_for_application_running(name, timeout_s=60)
def create_application(self, blocking: bool = True) -> str:
if len(self.applications) == self.max_applications:
app_to_delete = self.applications.pop()
serve.delete(app_to_delete)
new_name = "".join([random.choice(string.ascii_letters) for _ in range(10)])
@serve.deployment(name=new_name)
def handler(self, *args):
logging.getLogger("ray.serve").setLevel(logging.ERROR)
return new_name
if blocking:
ray.get(self.random_killer.spare.remote(new_name))
serve._run(
handler.bind(),
name=new_name,
route_prefix=f"/{new_name}",
_blocking=True,
)
self.applications.append(new_name)
ray.get(self.random_killer.stop_spare.remote(new_name))
else:
serve._run(
handler.bind(),
name=new_name,
route_prefix=f"/{new_name}",
_blocking=False,
)
self.applications.append(new_name)
return new_name
def verify_application(self):
app = random.choice(self.applications)
for _ in range(100):
try:
r = requests.get("http://127.0.0.1:8000/" + app)
assert r.text == app
except Exception:
print("Request to {} failed.".format(app))
time.sleep(0.1)
def run(self):
start_time = time.time()
previous_time = start_time
for iteration in range(NUM_ITERATIONS):
for _ in range(ACTIONS_PER_ITERATION):
actions, weights = zip(*self.weighted_actions)
action_chosen = random.choices(actions, weights=weights)[0]
print(f"Executing {action_chosen}")
action_chosen()
new_time = time.time()
print(
f"Iteration {iteration}:\n"
f" - Iteration time: {new_time - previous_time}.\n"
f" - Absolute time: {new_time}.\n"
f" - Total elapsed time: {new_time - start_time}."
)
update_progress(
{
"iteration": iteration,
"iteration_time": new_time - previous_time,
"absolute_time": new_time,
"elapsed_time": new_time - start_time,
}
)
previous_time = new_time
if RAY_UNIT_TEST:
break
random_killer = RandomKiller.remote()
tester = RandomTest(random_killer, max_applications=NUM_NODES * CPUS_PER_NODE)
tester.run()
| RandomTest |
python | kamyu104__LeetCode-Solutions | Python/count-and-say.py | {
"start": 37,
"end": 519
} | class ____(object):
# @return a string
def countAndSay(self, n):
seq = "1"
for i in xrange(n - 1):
seq = self.getNext(seq)
return seq
def getNext(self, seq):
i, next_seq = 0, ""
while i < len(seq):
cnt = 1
while i < len(seq) - 1 and seq[i] == seq[i + 1]:
cnt += 1
i += 1
next_seq += str(cnt) + seq[i]
i += 1
return next_seq
| Solution |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/caxis/_tickformatstop.py | {
"start": 235,
"end": 8537
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.caxis"
_path_str = "layout.ternary.caxis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.caxis.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.caxis.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.caxis.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | explosion__spaCy | spacy/training/alignment.py | {
"start": 150,
"end": 614
} | class ____:
x2y: AlignmentArray
y2x: AlignmentArray
@classmethod
def from_indices(cls, x2y: List[List[int]], y2x: List[List[int]]) -> "Alignment":
x2y = AlignmentArray(x2y)
y2x = AlignmentArray(y2x)
return Alignment(x2y=x2y, y2x=y2x)
@classmethod
def from_strings(cls, A: List[str], B: List[str]) -> "Alignment":
x2y, y2x = get_alignments(A, B)
return Alignment.from_indices(x2y=x2y, y2x=y2x)
| Alignment |
python | pypa__pip | tests/unit/test_search_scope.py | {
"start": 125,
"end": 1502
} | class ____:
def test_get_formatted_locations_basic_auth(self) -> None:
"""
Test that basic authentication credentials defined in URL
is not included in formatted output.
"""
index_urls = [
"https://pypi.org/simple",
"https://repo-user:repo-pass@repo.domain.com",
]
find_links = ["https://links-user:links-pass@page.domain.com"]
search_scope = SearchScope(
find_links=find_links,
index_urls=index_urls,
no_index=False,
)
result = search_scope.get_formatted_locations()
assert "repo-user:****@repo.domain.com" in result
assert "repo-pass" not in result
assert "links-user:****@page.domain.com" in result
assert "links-pass" not in result
def test_get_index_urls_locations(self) -> None:
"""Check that the canonical name is on all indexes"""
search_scope = SearchScope(
find_links=[],
index_urls=["file://index1/", "file://index2"],
no_index=False,
)
req = install_req_from_line("Complex_Name")
assert req.name is not None
actual = search_scope.get_index_urls_locations(req.name)
assert actual == [
"file://index1/complex-name/",
"file://index2/complex-name/",
]
| TestSearchScope |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 184702,
"end": 186428
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateIssue"""
__schema__ = github_schema
__field_names__ = (
"repository_id",
"title",
"body",
"assignee_ids",
"milestone_id",
"label_ids",
"project_ids",
"issue_template",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The Node ID of the repository."""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The title for the issue."""
body = sgqlc.types.Field(String, graphql_name="body")
"""The body for the issue description."""
assignee_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="assigneeIds")
"""The Node ID for the user assignee for this issue."""
milestone_id = sgqlc.types.Field(ID, graphql_name="milestoneId")
"""The Node ID of the milestone for this issue."""
label_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="labelIds")
"""An array of Node IDs of labels for this issue."""
project_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="projectIds")
"""An array of Node IDs for projects associated with this issue."""
issue_template = sgqlc.types.Field(String, graphql_name="issueTemplate")
"""The name of an issue template in the repository, assigns labels
and assignees from the template to the issue
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateIssueInput |
python | google__jax | tests/pretty_printer_test.py | {
"start": 698,
"end": 3883
} | class ____(jtu.JaxTestCase):
def testSourceMap(self):
doc = pp.concat([
pp.text("abc"),
pp.source_map(pp.text("def"), 101),
pp.source_map(
pp.concat([pp.text("gh"), pp.brk(""), pp.text("ijkl")]), 77
),
pp.text("mn"),
])
source_map = []
out = doc.format(width=8, source_map=source_map)
self.assertEqual(out, "abcdefgh\nijklmn")
self.assertEqual(source_map, [[(3, 6, 101), (6, 8, 77)], [(0, 4, 77)]])
def testBasics(self):
self.assertEqual(pp.nil().format(), "")
self.assertEqual(pp.text("").format(), "")
self.assertEqual(pp.text("testing").format(), "testing")
self.assertEqual(pp.text("\n").format(), "\n")
self.assertEqual(pp.brk().format(), "\n")
# Group that fits will use the space from brk()
self.assertEqual(pp.group(pp.brk()).format(), " ")
# Group that doesn't fit (due to width=0) will use newline
self.assertEqual(pp.group(pp.brk()).format(width=0), "\n")
# Custom break text
self.assertEqual(pp.group(pp.brk("-")).format(), "-")
self.assertEqual(pp.group(pp.brk("-")).format(width=0), "\n")
# Concatenation
self.assertEqual((pp.text("a") + pp.text("b")).format(), "ab")
self.assertEqual(pp.concat([pp.text("a"), pp.text("b c")]).format(), "ab c")
x = pp.text("x")
y = pp.text("y")
z = pp.text("z")
# Join
# Join with a break that becomes a space when fitting
join_doc_space = pp.join(
pp.text(",") + pp.brk(), [pp.text("a"), pp.text("b"), pp.text("c")]
)
self.assertEqual(pp.group(join_doc_space).format(), "a, b, c")
self.assertEqual(pp.group(join_doc_space).format(width=5), "a,\nb,\nc")
self.assertEqual(pp.join(pp.text(","), [x, y, z]).format(), "x,y,z")
j = pp.join(
pp.brk(), [pp.text("xx"), pp.text("yy"), pp.text("zz"), pp.text("ww")]
)
self.assertEqual(pp.group(j).format(width=3), "xx\nyy\nzz\nww")
self.assertEqual(pp.group(j).format(width=80), "xx yy zz ww")
bx = pp.brk() + x
bxbx = bx + bx
bx4 = bxbx + bxbx
# Horizontal-like (fits)
self.assertEqual(pp.group(bx).format(), " x")
self.assertEqual(pp.group(bxbx).format(), " x x")
self.assertEqual(pp.group(bx4).format(), " x x x x")
# Vertical-like (forced by width)
self.assertEqual(pp.group(bx).format(width=0), "\nx")
self.assertEqual(pp.group(bxbx).format(width=0), "\nx\nx")
self.assertEqual(pp.group(bx4).format(width=0), "\nx\nx\nx\nx")
self.assertEqual(pp.group(bxbx).format(width=3), "\nx\nx")
# Nesting
xbybz = x + pp.brk() + y + pp.brk() + z
self.assertEqual(pp.nest(2, pp.group(bx)).format(), " x") # Stays flat
self.assertEqual(pp.nest(2, pp.group(bxbx)).format(), " x x") # Stays flat
self.assertEqual(pp.nest(2, pp.group(bx)).format(width=0), "\n x")
self.assertEqual(
pp.nest(2, pp.nest(2, pp.group(bx))).format(width=0), "\n x"
)
self.assertEqual(pp.nest(2, pp.group(xbybz)).format(width=0), "x\n y\n z")
self.assertEqual(pp.nest(2, pp.group(bxbx)).format(width=0), "\n x\n x")
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| PrettyPrinterTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py | {
"start": 6266,
"end": 7431
} | class ____(BaseEnvironment):
def __init__(self, paths: Sequence[str]) -> None:
self._paths = paths
@classmethod
def default(cls) -> BaseEnvironment:
return cls(sys.path)
@classmethod
def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
if paths is None:
return cls(sys.path)
return cls(paths)
def _iter_distributions(self) -> Iterator[BaseDistribution]:
finder = _DistributionFinder()
for location in self._paths:
yield from finder.find(location)
for dist in finder.find_eggs(location):
_emit_egg_deprecation(dist.location)
yield dist
# This must go last because that's how pkg_resources tie-breaks.
yield from finder.find_linked(location)
def get_distribution(self, name: str) -> Optional[BaseDistribution]:
canonical_name = canonicalize_name(name)
matches = (
distribution
for distribution in self.iter_all_distributions()
if distribution.canonical_name == canonical_name
)
return next(matches, None)
| Environment |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 25588,
"end": 25851
} | class ____(Function):
"""
Straight-through Estimator(STE) for torch.floor()
"""
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
| floor_ste |
python | pydantic__pydantic | pydantic/types.py | {
"start": 73891,
"end": 74935
} | class ____(EncoderProtocol):
"""Standard (non-URL-safe) Base64 encoder."""
@classmethod
def decode(cls, data: bytes) -> bytes:
"""Decode the data from base64 encoded bytes to original bytes data.
Args:
data: The data to decode.
Returns:
The decoded data.
"""
try:
return base64.b64decode(data)
except ValueError as e:
raise PydanticCustomError('base64_decode', "Base64 decoding error: '{error}'", {'error': str(e)})
@classmethod
def encode(cls, value: bytes) -> bytes:
"""Encode the data from bytes to a base64 encoded bytes.
Args:
value: The data to encode.
Returns:
The encoded data.
"""
return base64.b64encode(value)
@classmethod
def get_json_format(cls) -> Literal['base64']:
"""Get the JSON format for the encoded data.
Returns:
The JSON format for the encoded data.
"""
return 'base64'
| Base64Encoder |
python | doocs__leetcode | solution/0300-0399/0300.Longest Increasing Subsequence/Solution.py | {
"start": 0,
"end": 277
} | class ____:
def lengthOfLIS(self, nums: List[int]) -> int:
n = len(nums)
f = [1] * n
for i in range(1, n):
for j in range(i):
if nums[j] < nums[i]:
f[i] = max(f[i], f[j] + 1)
return max(f)
| Solution |
python | Netflix__metaflow | test/core/tests/card_default_editable.py | {
"start": 514,
"end": 5146
} | class ____:
at = 0
def get(self):
return self.at
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('card(type="test_editable_card")')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(current.pathspec)
current.card.append(TestStringComponent(str(self.random_number)))
empty_list = current.card.get(type="nonexistingtype")
current.card.append(MyNativeType())
@tag('card(type="test_editable_card", id="xyz")')
@steps(0, ["foreach-nested-inner"])
def step_foreach_inner(self):
# In this step `test_editable_card` should be considered default editable even with `id`
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(current.pathspec)
current.card.append(TestStringComponent(str(self.random_number)))
@tag('card(type="taskspec_card")')
@tag('card(type="test_editable_card")')
@steps(0, ["join"])
def step_join(self):
# In this step `taskspec_card` should not be considered default editable
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(current.pathspec)
current.card.append(TestStringComponent(str(self.random_number)))
@tag('card(type="test_editable_card")')
@steps(1, ["all"])
def step_all(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(current.pathspec)
current.card.append(TestStringComponent(str(self.random_number)))
def check_results(self, flow, checker):
run = checker.get_run()
card_type = "test_editable_card"
if run is None:
# This means CliCheck is in context.
for step in flow:
cli_check_dict = checker.artifact_dict(step.name, "random_number")
for task_pathspec in cli_check_dict:
# full_pathspec = "/".join([flow.name, task_pathspec])
task_id = task_pathspec.split("/")[-1]
cards_info = checker.list_cards(step.name, task_id, card_type)
number = cli_check_dict[task_pathspec]["random_number"]
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 1,
True,
)
card = cards_info["cards"][0]
checker.assert_card(
step.name,
task_id,
card_type,
"%d" % number,
card_hash=card["hash"],
exact_match=True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
meta_check_dict = checker.artifact_dict(step.name, "random_number")
for task_id in meta_check_dict:
random_number = meta_check_dict[task_id]["random_number"]
cards_info = checker.list_cards(step.name, task_id, card_type)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 1,
True,
)
for card in cards_info["cards"]:
checker.assert_card(
step.name,
task_id,
card_type,
"%d" % random_number,
card_hash=card["hash"],
exact_match=False,
)
| MyNativeType |
python | pypa__pipenv | pipenv/patched/pip/_vendor/pygments/sphinxext.py | {
"start": 752,
"end": 8071
} | class ____(Directive):
"""
A directive to collect all lexers/formatters/filters and generate
autoclass directives for them.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
self.filenames = set()
if self.arguments[0] == 'lexers':
out = self.document_lexers()
elif self.arguments[0] == 'formatters':
out = self.document_formatters()
elif self.arguments[0] == 'filters':
out = self.document_filters()
elif self.arguments[0] == 'lexers_overview':
out = self.document_lexers_overview()
else:
raise Exception('invalid argument for "pygmentsdoc" directive')
node = nodes.compound()
vl = ViewList(out.split('\n'), source='')
nested_parse_with_titles(self.state, vl, node)
for fn in self.filenames:
self.state.document.settings.record_dependencies.add(fn)
return node.children
def document_lexers_overview(self):
"""Generate a tabular overview of all lexers.
The columns are the lexer name, the extensions handled by this lexer
(or "None"), the aliases and a link to the lexer class."""
from pipenv.patched.pip._vendor.pygments.lexers._mapping import LEXERS
from pipenv.patched.pip._vendor.pygments.lexers import find_lexer_class
out = []
table = []
def format_link(name, url):
if url:
return f'`{name} <{url}>`_'
return name
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
lexer_cls = find_lexer_class(data[1])
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
table.append({
'name': format_link(data[1], lexer_cls.url),
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
'aliases': ', '.join(data[2]),
'class': f'{data[0]}.{classname}'
})
column_names = ['name', 'extensions', 'aliases', 'class']
column_lengths = [max([len(row[column]) for row in table if row[column]])
for column in column_names]
def write_row(*columns):
"""Format a table row"""
out = []
for length, col in zip(column_lengths, columns):
if col:
out.append(col.ljust(length))
else:
out.append(' '*length)
return ' '.join(out)
def write_seperator():
"""Write a table separator row"""
sep = ['='*c for c in column_lengths]
return write_row(*sep)
out.append(write_seperator())
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
out.append(write_seperator())
for row in table:
out.append(write_row(
row['name'],
row['extensions'],
row['aliases'],
f':class:`~{row["class"]}`'))
out.append(write_seperator())
return '\n'.join(out)
def document_lexers(self):
from pipenv.patched.pip._vendor.pygments.lexers._mapping import LEXERS
from pipenv.patched.pip._vendor import pygments
import inspect
import pathlib
out = []
modules = {}
moduledocstrings = {}
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
if not cls.__doc__:
print(f"Warning: {classname} does not have a docstring.")
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
example_file = getattr(cls, '_example', None)
if example_file:
p = pathlib.Path(inspect.getabsfile(pygments)).parent.parent /\
'tests' / 'examplefiles' / example_file
content = p.read_text(encoding='utf-8')
if not content:
raise Exception(
f"Empty example file '{example_file}' for lexer "
f"{classname}")
if data[2]:
lexer_name = data[2][0]
docstring += '\n\n .. admonition:: Example\n'
docstring += f'\n .. code-block:: {lexer_name}\n\n'
for line in content.splitlines():
docstring += f' {line}\n'
if cls.version_added:
version_line = f'.. versionadded:: {cls.version_added}'
else:
version_line = ''
modules.setdefault(module, []).append((
classname,
', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
', '.join(data[4]) or 'None',
docstring,
version_line))
if module not in moduledocstrings:
moddoc = mod.__doc__
if isinstance(moddoc, bytes):
moddoc = moddoc.decode('utf8')
moduledocstrings[module] = moddoc
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
if moduledocstrings[module] is None:
raise Exception(f"Missing docstring for {module}")
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
for data in lexers:
out.append(LEXERDOC % data)
return ''.join(out)
def document_formatters(self):
from pipenv.patched.pip._vendor.pygments.formatters import FORMATTERS
out = []
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
heading = cls.__name__
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*') or 'None',
docstring))
return ''.join(out)
def document_filters(self):
from pipenv.patched.pip._vendor.pygments.filters import FILTERS
out = []
for name, cls in FILTERS.items():
self.filenames.add(sys.modules[cls.__module__].__file__)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
out.append(FILTERDOC % (cls.__name__, name, docstring))
return ''.join(out)
def setup(app):
app.add_directive('pygmentsdoc', PygmentsDoc)
| PygmentsDoc |
python | pytorch__pytorch | torch/export/_trace.py | {
"start": 3789,
"end": 4815
} | class ____:
"""
Manage Export-specific configurations of Dynamo.
"""
allow_rnn: bool = True
reorderable_logging_functions: set[Callable] = dataclasses.field(
default_factory=set
)
# Emit runtime asserts after AOTAutograd instead.
# This isn't really necessary, and isn't much more efficient since the runtime asserts pass does CSE,
# but if we want to reason more about what guards/runtime asserts to emit,
# this makes it a bit cleaner to do from the export side. Also no real point in running this twice.
do_not_emit_runtime_asserts: bool = True
specialize_int: bool = True
specialize_float: bool = True
assume_static_by_default: bool = False
automatic_dynamic_shapes: bool = False
capture_dynamic_output_shape_ops: bool = True
capture_scalar_outputs: bool = True
prefer_deferred_runtime_asserts_over_guards: bool = False
replay_side_effects: bool = False
side_effect_replay_policy: str = "warn"
@dataclasses.dataclass
| ExportDynamoConfig |
python | mlflow__mlflow | examples/pyfunc/model_as_code.py | {
"start": 724,
"end": 1998
} | class ____(pyfunc.PythonModel):
@mlflow.trace(name="chain", span_type="CHAIN")
def predict(self, context, model_input):
if isinstance(model_input, pd.DataFrame):
model_input = model_input["input"].tolist()
responses = []
for user_input in model_input:
response = self.get_open_ai_model_response(str(user_input))
responses.append(response.choices[0].message.content)
return pd.DataFrame({"response": responses})
@mlflow.trace(name="open_ai", span_type="LLM")
def get_open_ai_model_response(self, user_input):
from openai import OpenAI
return OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": "You are a helpful assistant. You are here to provide useful information to the user.",
},
{
"role": "user",
"content": user_input,
},
],
)
# IMPORTANT: The model code needs to call `mlflow.models.set_model()` to set the model,
# which will be loaded back using `mlflow.pyfunc.load_model` for inference.
mlflow.models.set_model(AIModel())
| AIModel |
python | pydantic__pydantic | tests/mypy/modules/fail_defaults.py | {
"start": 40,
"end": 462
} | class ____(BaseModel):
# Required
undefined_default_no_args: int = Field()
undefined_default: int = Field(description='my desc')
positional_ellipsis_default: int = Field(...)
named_ellipsis_default: int = Field(default=...)
# Not required
positional_default: int = Field(1)
named_default: int = Field(default=2)
named_default_factory: int = Field(default_factory=lambda: 3)
Model()
| Model |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles04.py | {
"start": 380,
"end": 8870
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for border styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.fileclosed = 1
workbook.add_format({"top": 7})
workbook.add_format({"top": 4})
workbook.add_format({"top": 11})
workbook.add_format({"top": 9})
workbook.add_format({"top": 3})
workbook.add_format({"top": 1})
workbook.add_format({"top": 12})
workbook.add_format({"top": 13})
workbook.add_format({"top": 10})
workbook.add_format({"top": 8})
workbook.add_format({"top": 2})
workbook.add_format({"top": 5})
workbook.add_format({"top": 6})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="14">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="hair">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="dotted">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="dashDotDot">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="dashDot">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="dashed">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="thin">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="mediumDashDotDot">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="slantDashDot">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="mediumDashDot">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="mediumDashed">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="medium">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="thick">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
<border>
<left/>
<right/>
<top style="double">
<color auto="1"/>
</top>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="14">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="1" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="2" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="3" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="4" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="5" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="6" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="7" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="8" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="9" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="10" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="11" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="12" xfId="0" applyBorder="1"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="13" xfId="0" applyBorder="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultFunction1.py | {
"start": 622,
"end": 1304
} | class ____(Generic[P]):
def __init__(self, x: Callable[P, None]) -> None: ...
def func2(x: int | ClassA[P]) -> ClassA[P]: ...
def callback1(x: str) -> None: ...
v2_1 = func2(ClassA(callback1))
reveal_type(v2_1, expected_text="ClassA[(x: str)]")
v2_2 = func2(3)
reveal_type(v2_2, expected_text="ClassA[(int, str, str)]")
Ts = TypeVarTuple("Ts", default=Unpack[tuple[int, str, float]])
def func3(x: int | Callable[[*Ts], None]) -> tuple[*Ts]: ...
v3_1 = func3(callback1)
reveal_type(v3_1, expected_text="tuple[str]")
v3_2 = func3(3)
reveal_type(v3_2, expected_text="tuple[int, str, float]")
P2 = ParamSpec("P2", default=...)
P3 = ParamSpec("P3", default="...")
| ClassA |
python | pandas-dev__pandas | pandas/tests/window/test_timeseries_window.py | {
"start": 735,
"end": 25130
} | class ____:
# rolling time-series friendly
# xref GH13327
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self, regular):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq, regular):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq, regular):
regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp, regular):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
regular.rolling(window="1D", min_periods=minp)
def test_on(self, regular):
df = regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2D", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2D", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic_increasing
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic_increasing
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic_increasing
msg = "index values must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self, unit):
# using multiple aggregation columns
dti = DatetimeIndex(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
).as_unit(unit)
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": dti,
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self, regular):
df = regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self, regular):
# compare for min_periods
df = regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self, regular, unit):
# xref GH13965
dti = DatetimeIndex(
[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
]
).as_unit(unit)
df = DataFrame(
{"A": [1] * 5},
index=dti,
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self, ragged):
df = ragged
result = df.rolling(window="3s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self, ragged):
df = ragged
result = df.rolling(window="3s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = ragged
result = df.rolling(window="1s").count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).count()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}
).set_index("A")
result = df.rolling("1s").min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]}
).set_index("A")
tm.assert_frame_equal(result, expected)
result = df.rolling("2s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling("5s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame(
{"B": np.random.default_rng(2).standard_normal(N)},
index=date_range("20130101", periods=N, freq="s"),
)
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling("2s").min()
assert ((result - expected) < 0.01).all().all()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling("200s").min()
assert ((result - expected) < 0.01).all().all()
def test_ragged_max(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_ragged_first(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).first()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).first()
expected = df.copy()
expected["B"] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).first()
expected = df.copy()
expected["B"] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_ragged_last(self, ragged):
df = ragged
result = df.rolling(window="1s", min_periods=1).last()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).last()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).last()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"freq, op, result_data",
[
("ms", "min", [0.0] * 10),
("ms", "mean", [0.0] * 9 + [2.0 / 9]),
("ms", "max", [0.0] * 9 + [2.0]),
("s", "min", [0.0] * 10),
("s", "mean", [0.0] * 9 + [2.0 / 9]),
("s", "max", [0.0] * 9 + [2.0]),
("min", "min", [0.0] * 10),
("min", "mean", [0.0] * 9 + [2.0 / 9]),
("min", "max", [0.0] * 9 + [2.0]),
("h", "min", [0.0] * 10),
("h", "mean", [0.0] * 9 + [2.0 / 9]),
("h", "max", [0.0] * 9 + [2.0]),
("D", "min", [0.0] * 10),
("D", "mean", [0.0] * 9 + [2.0 / 9]),
("D", "max", [0.0] * 9 + [2.0]),
],
)
def test_freqs_ops(self, freq, op, result_data):
# GH 21096
index = date_range(start="2018-1-1 01:00:00", freq=f"1{freq}", periods=10)
# Explicit cast to float to avoid implicit cast when setting nan
s = Series(data=0, index=index, dtype="float")
s.iloc[1] = np.nan
s.iloc[-1] = 2
result = getattr(s.rolling(window=f"10{freq}"), op)()
expected = Series(data=result_data, index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
"count",
"median",
"std",
"var",
"kurt",
"skew",
"min",
"max",
"first",
"last",
],
)
def test_all(self, f, regular):
# simple comparison of integer vs time-based windowing
df = regular * 2
er = df.rolling(window=1)
r = df.rolling(window="1s")
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
def test_all2(self, arithmetic_win_operators):
f = arithmetic_win_operators
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame(
{"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="h")
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window="5h")
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = (
df.groupby(df.index.day).apply(agg_by_day).reset_index(level=0, drop=True)
)
tm.assert_frame_equal(result, expected)
def test_rolling_cov_offset(self):
# GH16058
idx = date_range("2017-01-01", periods=24, freq="1h")
ss = Series(np.arange(len(idx)), index=idx)
result = ss.rolling("2h").cov()
expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(2, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
result = ss.rolling("3h").cov()
expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(3, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
def test_rolling_on_decreasing_index(self, unit):
# GH-19248, GH-32385
index = DatetimeIndex(
[
Timestamp("20190101 09:00:30"),
Timestamp("20190101 09:00:27"),
Timestamp("20190101 09:00:20"),
Timestamp("20190101 09:00:18"),
Timestamp("20190101 09:00:10"),
]
).as_unit(unit)
df = DataFrame({"column": [3, 4, 4, 5, 6]}, index=index)
result = df.rolling("5s").min()
expected = DataFrame({"column": [3.0, 3.0, 4.0, 4.0, 6.0]}, index=index)
tm.assert_frame_equal(result, expected)
def test_rolling_on_empty(self):
# GH-32385
df = DataFrame({"column": []}, index=[])
result = df.rolling("5s").min()
expected = DataFrame({"column": []}, index=[])
tm.assert_frame_equal(result, expected)
def test_rolling_on_multi_index_level(self):
# GH-15584
df = DataFrame(
{"column": range(6)},
index=MultiIndex.from_product(
[date_range("20190101", periods=3), range(2)], names=["date", "seq"]
),
)
result = df.rolling("10D", on=df.index.get_level_values("date")).sum()
expected = DataFrame(
{"column": [0.0, 1.0, 3.0, 6.0, 10.0, 15.0]}, index=df.index
)
tm.assert_frame_equal(result, expected)
def test_nat_axis_error():
idx = [Timestamp("2020"), NaT]
df = DataFrame(np.eye(2), index=idx)
with pytest.raises(ValueError, match="index values must not have NaT"):
df.rolling("D").mean()
@td.skip_if_no("pyarrow")
def test_arrow_datetime_axis():
# GH 55849
expected = Series(
np.arange(5, dtype=np.float64),
index=Index(
date_range("2020-01-01", periods=5), dtype="timestamp[ns][pyarrow]"
),
)
result = expected.rolling("1D").sum()
tm.assert_series_equal(result, expected)
| TestRollingTS |
python | Farama-Foundation__Gymnasium | tests/vector/testing_utils.py | {
"start": 1469,
"end": 2530
} | class ____(gym.Env):
"""A custom slow environment."""
def __init__(self, slow_reset=0.3):
"""Initialises the environment with a slow reset parameter used in the `step` and `reset` functions."""
super().__init__()
self.slow_reset = slow_reset
self.observation_space = Box(
low=0, high=255, shape=(HEIGHT, WIDTH, 3), dtype=np.uint8
)
self.action_space = Box(low=0.0, high=1.0, shape=(), dtype=np.float32)
def reset(self, *, seed: int | None = None, options: dict | None = None):
"""Resets the environment with a time sleep."""
super().reset(seed=seed)
if self.slow_reset > 0:
time.sleep(self.slow_reset)
return self.observation_space.sample(), {}
def step(self, action):
"""Steps through the environment with a time sleep."""
time.sleep(action)
observation = self.observation_space.sample()
reward, terminated, truncated = 0.0, False, False
return observation, reward, terminated, truncated, {}
| SlowEnv |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/pep604.py | {
"start": 140,
"end": 292
} | class ____:
"""docstring"""
attr: int | str #: docstring
def meth(self, x: int | str, y: int | str) -> int | str:
"""docstring"""
| Foo |
python | facelessuser__soupsieve | tests/test_api.py | {
"start": 125,
"end": 14744
} | class ____(util.TestCase):
"""Test Soup Sieve."""
def test_select(self):
"""Test select."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [el.attrs['id'] for el in sv.select('span[id]', soup)]
self.assertEqual(sorted(['5', 'some-id']), sorted(ids))
def test_select_order(self):
"""Test select order."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [el.attrs['id'] for el in sv.select('[id]', soup.body)]
self.assertEqual(['1', '2', '3', '4', '5', 'some-id', '6'], ids)
def test_select_limit(self):
"""Test select limit."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [el.attrs['id'] for el in sv.select('span[id]', soup, limit=1)]
self.assertEqual(sorted(['5']), sorted(ids))
def test_select_one(self):
"""Test select one."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
self.assertEqual(
sv.select('span[id]', soup, limit=1)[0].attrs['id'],
sv.select_one('span[id]', soup).attrs['id']
)
def test_select_one_none(self):
"""Test select one returns none for no match."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
self.assertEqual(None, sv.select_one('h1', soup))
def test_iselect(self):
"""Test select iterator."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [el.attrs['id'] for el in sv.iselect('span[id]', soup)]
self.assertEqual(sorted(['5', 'some-id']), sorted(ids))
def test_iselect_order(self):
"""Test select iterator order."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [el.attrs['id'] for el in sv.iselect('[id]', soup)]
self.assertEqual(['1', '2', '3', '4', '5', 'some-id', '6'], ids)
def test_match(self):
"""Test matching."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
nodes = sv.select('span[id]', soup)
self.assertTrue(sv.match('span#\\35', nodes[0]))
self.assertFalse(sv.match('span#\\35', nodes[1]))
def test_filter_tag(self):
"""Test filter tag."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
nodes = sv.filter('pre#\\36', soup.html.body)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].attrs['id'], '6')
def test_filter_tag_order(self):
"""Test filter tag order."""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
ids = [tag['id'] for tag in sv.filter('[id]', soup.html.body.p)]
self.assertEqual(['2', '3'], ids)
def test_filter_list(self):
"""
Test filter list.
Even if a list is created from the content of a tag, as long as the
content is document nodes, filter will still handle it. It doesn't have
to be just tags.
"""
markup = """
<!-- before header -->
<html>
<head>
</head>
<body>
<!-- comment -->
<p id="1"><code id="2"></code><img id="3" src="./image.png"/></p>
<pre id="4"></pre>
<p><span id="5" class="some-class"></span><span id="some-id"></span></p>
<pre id="6" class='ignore'>
<!-- don't ignore -->
</pre>
</body>
</html>
"""
soup = self.soup(markup, 'html.parser')
nodes = sv.filter('pre#\\36', list(soup.html.body.children))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].attrs['id'], '6')
def test_closest_match_parent(self):
"""Test match parent closest."""
markup = """
<article id="article">
<div id="div-01">Here is div-01
<div id="div-02">Here is div-02
<div id="div-04">Here is div-04</div>
<div id="div-03">Here is div-03</div>
</div>
<div id="div-05">Here is div-05</div>
</div>
</article>
"""
soup = self.soup(markup, 'html.parser')
el = sv.select_one('#div-03', soup)
self.assertTrue(sv.closest('#div-02', el).attrs['id'] == 'div-02')
def test_closest_match_complex_parent(self):
"""Test closest match complex parent."""
markup = """
<article id="article">
<div id="div-01">Here is div-01
<div id="div-02">Here is div-02
<div id="div-04">Here is div-04</div>
<div id="div-03">Here is div-03</div>
</div>
<div id="div-05">Here is div-05</div>
</div>
</article>
"""
soup = self.soup(markup, 'html.parser')
el = sv.select_one('#div-03', soup)
self.assertTrue(sv.closest('article > div', el).attrs['id'] == 'div-01')
self.assertTrue(sv.closest(':not(div)', el).attrs['id'] == 'article')
def test_closest_match_self(self):
"""Test closest match self."""
markup = """
<article id="article">
<div id="div-01">Here is div-01
<div id="div-02">Here is div-02
<div id="div-04">Here is div-04</div>
<div id="div-03">Here is div-03</div>
</div>
<div id="div-05">Here is div-05</div>
</div>
</article>
"""
soup = self.soup(markup, 'html.parser')
el = sv.select_one('#div-03', soup)
self.assertTrue(sv.closest('div div', el).attrs['id'] == 'div-03')
def test_closest_must_be_parent(self):
"""Test that closest only matches parents or self."""
markup = """
<article id="article">
<div id="div-01">Here is div-01
<div id="div-02">Here is div-02
<div id="div-04">Here is div-04</div>
<div id="div-03">Here is div-03</div>
</div>
<div id="div-05">Here is div-05</div>
</div>
</article>
"""
soup = self.soup(markup, 'html.parser')
el = sv.select_one('#div-03', soup)
self.assertTrue(sv.closest('div #div-05', el) is None)
self.assertTrue(sv.closest('a', el) is None)
def test_escape_hyphen(self):
"""Test escape hyphen cases."""
self.assertEqual(r'\-', sv.escape('-'))
self.assertEqual(r'--', sv.escape('--'))
def test_escape_numbers(self):
"""Test escape hyphen cases."""
self.assertEqual(r'\33 ', sv.escape('3'))
self.assertEqual(r'-\33 ', sv.escape('-3'))
self.assertEqual(r'--3', sv.escape('--3'))
def test_escape_null(self):
"""Test escape null character."""
self.assertEqual('\ufffdtest', sv.escape('\x00test'))
def test_escape_ctrl(self):
"""Test escape control character."""
self.assertEqual(r'\1 test', sv.escape('\x01test'))
def test_escape_special(self):
"""Test escape special character."""
self.assertEqual(r'\{\}\[\]\ \(\)', sv.escape('{}[] ()'))
def test_escape_wide_unicode(self):
"""Test handling of wide Unicode."""
self.assertEqual('Emoji\\ \U0001F60D', sv.escape('Emoji \U0001F60D'))
def test_copy_pickle(self):
"""Test copy and pickle."""
# Test that we can pickle and unpickle
# We force a pattern that contains all custom types:
# `Selector`, `NullSelector`, `SelectorTag`, `SelectorAttribute`,
# `SelectorNth`, `SelectorLang`, `SelectorList`, `Namespaces`,
# `SelectorContains`, and `CustomSelectors`.
p1 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus:-soup-contains("text", "other text")',
{'html': 'http://www.w3.org/TR/html4/'},
custom={':--header': 'h1, h2, h3, h4, h5, h6'}
)
sp1 = pickle.dumps(p1)
pp1 = pickle.loads(sp1)
self.assertTrue(pp1 == p1)
# Test that we pull the same one from cache
p2 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus:-soup-contains("text", "other text")',
{'html': 'http://www.w3.org/TR/html4/'},
custom={':--header': 'h1, h2, h3, h4, h5, h6'}
)
self.assertTrue(p1 is p2)
# Test that we compile a new one when providing a different flags
p3 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus:-soup-contains("text", "other text")',
{'html': 'http://www.w3.org/TR/html4/'},
custom={':--header': 'h1, h2, h3, h4, h5, h6'},
flags=0x10
)
self.assertTrue(p1 is not p3)
self.assertTrue(p1 != p3)
# Test that the copy is equivalent, but not same.
p4 = copy.copy(p1)
self.assertTrue(p4 is not p1)
self.assertTrue(p4 == p1)
p5 = copy.copy(p3)
self.assertTrue(p5 is not p3)
self.assertTrue(p5 == p3)
self.assertTrue(p5 is not p4)
def test_cache(self):
"""Test cache."""
sv.purge()
self.assertEqual(sv.cp._cached_css_compile.cache_info().currsize, 0)
for _x in range(1000):
value = f'[value="{random.randint(1, 10000)!s}"]'
p = sv.compile(value)
self.assertTrue(p.pattern == value)
self.assertTrue(sv.cp._cached_css_compile.cache_info().currsize > 0)
self.assertTrue(sv.cp._cached_css_compile.cache_info().currsize == 500)
sv.purge()
self.assertEqual(sv.cp._cached_css_compile.cache_info().currsize, 0)
def test_recompile(self):
"""If you feed through the same object, it should pass through unless you change parameters."""
p1 = sv.compile('p[id]')
p2 = sv.compile(p1)
self.assertTrue(p1 is p2)
with pytest.raises(ValueError):
sv.compile(p1, flags=sv.DEBUG)
with pytest.raises(ValueError):
sv.compile(p1, namespaces={"": ""})
with pytest.raises(ValueError):
sv.compile(p1, custom={":--header": 'h1, h2, h3, h4, h5, h6'})
def test_immutable_dict_size(self):
"""Test immutable dictionary."""
idict = sv.ct.ImmutableDict({'a': 'b', 'c': 'd'})
self.assertEqual(2, len(idict))
| TestSoupSieve |
python | pytorch__pytorch | torch/_appdirs.py | {
"start": 20027,
"end": 26197
} | class ____:
"""Convenience wrapper for getting application dirs."""
def __init__(
self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(
self.appname, self.appauthor, version=self.version, roaming=self.roaming
)
@property
def site_data_dir(self):
return site_data_dir(
self.appname, self.appauthor, version=self.version, multipath=self.multipath
)
@property
def user_config_dir(self):
return user_config_dir(
self.appname, self.appauthor, version=self.version, roaming=self.roaming
)
@property
def site_config_dir(self):
return site_config_dir(
self.appname, self.appauthor, version=self.version, multipath=self.multipath
)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor, version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor, version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor, version=self.version)
# ---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
dir, _type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shell, shellcon
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros("c", buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(
None,
getattr(win32.ShlObj, csidl_name),
None,
win32.ShlObj.SHGFP_TYPE_CURRENT,
buf,
)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros("c", buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
# ---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir",
)
print(f"-- app dirs {__version__} --")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print(f"{prop}: {getattr(dirs, prop)}")
| AppDirs |
python | django__django | tests/template_tests/filter_tests/test_addslashes.py | {
"start": 897,
"end": 1332
} | class ____(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
addslashes("\"double quotes\" and 'single quotes'"),
"\\\"double quotes\\\" and \\'single quotes\\'",
)
def test_backslashes(self):
self.assertEqual(addslashes(r"\ : backslashes, too"), "\\\\ : backslashes, too")
def test_non_string_input(self):
self.assertEqual(addslashes(123), "123")
| FunctionTests |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_minnesota_zip.py | {
"start": 1759,
"end": 4110
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Minnesota zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_minnesota_zip": ["55040", "55330", "55781", "55968"],
"invalid_minnesota_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_minnesota_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_minnesota_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_minnesota_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMinnesotaZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidMinnesotaZip |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 17045,
"end": 18261
} | class ____(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
"""A meta-regressor which is also a consumer."""
def __init__(self, estimator, registry=None):
self.estimator = estimator
self.registry = registry
def fit(self, X, y, sample_weight=None, **fit_params):
if self.registry is not None:
self.registry.append(self)
record_metadata(self, sample_weight=sample_weight)
params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params)
self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
return self
def predict(self, X, **predict_params):
params = process_routing(self, "predict", **predict_params)
return self.estimator_.predict(X, **params.estimator.predict)
def get_metadata_routing(self):
router = (
MetadataRouter(owner=self)
.add_self_request(self)
.add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict"),
)
)
return router
| WeightedMetaRegressor |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/split_op_test.py | {
"start": 1479,
"end": 17189
} | class ____(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# scalar Tensors are not permitted as num_splits
for axis in [0, -2]:
with self.cached_session() as sess:
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=constant_op.constant(2),
axis=axis))
# pylint: enable=expression-not-assigned
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session() as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session() as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertIn("Cannot infer argument `num` from shape",
str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.cast(5, dtypes.int32)
b = math_ops.cast(6, dtypes.int32)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[tuple(slices)])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[tuple(slices)])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(tf_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[tuple(slices)])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session():
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
if not dtype.is_integer and dtype not in [
dtypes.float8_e5m2,
dtypes.float8_e4m3fn,
]:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegex(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegex(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session() as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
@test_util.run_in_graph_and_eager_modes
def testNegativeSizes(self):
x = constant_op.constant([1, 2, 3], dtypes.float32)
# A size of -1 signifies to determine size based on sum of other splits.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Split size at index 1 must be >= .*. Got: -2"):
splits = [-1, -2]
self.evaluate(array_ops.split(x, splits, axis=0))
@test_util.run_in_graph_and_eager_modes
def testBadSplitSizes(self):
x = constant_op.constant([1, 2], dtypes.float32)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Determined shape must either match input"
"|can't split axis"):
splits = [1, 2]
self.evaluate(array_ops.split(x, splits, axis=0))
@test_util.run_in_graph_and_eager_modes
def testSplitVBigTensors(self):
input_shape = [1, 64, 32768]
x = np.linspace(
start=1,
stop=np.prod(input_shape),
num=np.prod(input_shape),
dtype=np.float32).reshape(input_shape)
split_axis = 1
size_splits = [1] * input_shape[split_axis]
y = array_ops.split(x, num_or_size_splits=size_splits, axis=split_axis)
for i in range(input_shape[split_axis]):
result = y[i]
expected = x[:, i:i + 1, :]
self.assertAllEqual(result, expected)
@test_util.run_in_graph_and_eager_modes
def testSplitVBigTensorsWithIrregularSplits(self):
input_shape = [1, 64, 32768]
x = np.linspace(start=1,
stop=np.prod(input_shape),
num=np.prod(input_shape),
dtype=np.float32).reshape(input_shape)
split_axis = 1
size_splits = [32, 16, 8, 4, 2, 1, 1]
y = array_ops.split(x, num_or_size_splits=size_splits, axis=split_axis)
start = 0
for i in range(len(size_splits)):
result = y[i]
split_size = size_splits[i]
expected = x[:, start:start+split_size, :]
start += split_size
self.assertAllEqual(result, expected)
if __name__ == "__main__":
test.main()
| SplitOpTest |
python | getsentry__sentry | src/sentry/api/serializers/models/project_template.py | {
"start": 554,
"end": 622
} | class ____(StrEnum):
OPTIONS = "options"
| ProjectTemplateAttributes |
python | getsentry__sentry | tests/sentry/organizations/services/test_organization.py | {
"start": 291,
"end": 2999
} | class ____(TestCase):
def test_check_active_organization_by_slug(self) -> None:
self.organization = self.create_organization(slug="test")
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=True)
== self.organization.id
)
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=False)
== self.organization.id
)
def test_check_missing_organization_by_slug(self) -> None:
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=True) is None
)
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=False) is None
)
def test_check_pending_deletion_organization_by_slug(self) -> None:
self.organization = self.create_organization(slug="test")
self.organization.status = OrganizationStatus.PENDING_DELETION
with assume_test_silo_mode_of(Organization):
self.organization.save()
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=True) is None
)
assert (
organization_service.check_organization_by_slug(slug="test", only_visible=False)
== self.organization.id
)
def test_check_active_organization_by_id(self) -> None:
organization = self.create_organization(slug="test")
assert (
organization_service.check_organization_by_id(id=organization.id, only_visible=True)
is True
)
assert (
organization_service.check_organization_by_id(id=organization.id, only_visible=False)
is True
)
def test_check_missing_organization_by_id(self) -> None:
assert organization_service.check_organization_by_id(id=1234, only_visible=True) is False
assert organization_service.check_organization_by_id(id=1234, only_visible=False) is False
def test_check_pending_deletion_organization_by_id(self) -> None:
self.organization = self.create_organization(slug="test")
self.organization.status = OrganizationStatus.PENDING_DELETION
with assume_test_silo_mode_of(Organization):
self.organization.save()
assert (
organization_service.check_organization_by_id(
id=self.organization.id, only_visible=True
)
is False
)
assert (
organization_service.check_organization_by_id(
id=self.organization.id, only_visible=False
)
is True
)
| CheckOrganizationTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/secrets/systems_manager.py | {
"start": 1120,
"end": 8496
} | class ____(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection or Variables from AWS SSM Parameter Store.
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
backend_kwargs = {"connections_prefix": "/airflow/connections", "profile_name": null}
For example, if ssm path is ``/airflow/connections/smtp_default``, this would be accessible
if you provide ``{"connections_prefix": "/airflow/connections"}`` and request conn_id ``smtp_default``.
And if ssm path is ``/airflow/variables/hello``, this would be accessible
if you provide ``{"variables_prefix": "/airflow/variables"}`` and variable key ``hello``.
:param connections_prefix: Specifies the prefix of the secret to read to get Connections.
If set to None (null), requests for connections will not be sent to AWS SSM Parameter Store.
:param connections_lookup_pattern: Specifies a pattern the connection ID needs to match to be looked up in
AWS Parameter Store. Applies only if `connections_prefix` is not None.
If set to None (null value in the configuration), all connections will be looked up first in
AWS Parameter Store.
:param variables_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for variables will not be sent to AWS SSM Parameter Store.
:param variables_lookup_pattern: Specifies a pattern the variable key needs to match to be looked up in
AWS Parameter Store. Applies only if `variables_prefix` is not None.
If set to None (null value in the configuration), all variables will be looked up first in
AWS Parameter Store.
:param config_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for configurations will not be sent to AWS SSM Parameter Store.
:param config_lookup_pattern: Specifies a pattern the config key needs to match to be looked up in
AWS Parameter Store. Applies only if `config_prefix` is not None.
If set to None (null value in the configuration), all config keys will be looked up first in
AWS Parameter Store.
You can also pass additional keyword arguments listed in AWS Connection Extra config
to this class, and they would be used for establish connection and passed on to Boto3 client.
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
backend_kwargs = {"connections_prefix": "airflow/connections", "region_name": "eu-west-1"}
.. seealso::
:ref:`howto/connection:aws:configuring-the-connection`
"""
def __init__(
self,
connections_prefix: str = "/airflow/connections",
connections_lookup_pattern: str | None = None,
variables_prefix: str = "/airflow/variables",
variables_lookup_pattern: str | None = None,
config_prefix: str = "/airflow/config",
config_lookup_pattern: str | None = None,
**kwargs,
):
super().__init__()
if connections_prefix is not None:
self.connections_prefix = connections_prefix.rstrip("/")
else:
self.connections_prefix = connections_prefix
if variables_prefix is not None:
self.variables_prefix = variables_prefix.rstrip("/")
else:
self.variables_prefix = variables_prefix
if config_prefix is not None:
self.config_prefix = config_prefix.rstrip("/")
else:
self.config_prefix = config_prefix
self.connections_lookup_pattern = connections_lookup_pattern
self.variables_lookup_pattern = variables_lookup_pattern
self.config_lookup_pattern = config_lookup_pattern
self.profile_name = kwargs.get("profile_name", None)
# Remove client specific arguments from kwargs
self.api_version = kwargs.pop("api_version", None)
self.use_ssl = kwargs.pop("use_ssl", None)
self.kwargs = kwargs
@cached_property
def client(self):
"""Create a SSM client."""
from airflow.providers.amazon.aws.hooks.base_aws import SessionFactory
from airflow.providers.amazon.aws.utils.connection_wrapper import AwsConnectionWrapper
conn_id = f"{self.__class__.__name__}__connection"
conn_config = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=self.kwargs)
client_kwargs = trim_none_values(
{
"region_name": conn_config.region_name,
"verify": conn_config.verify,
"endpoint_url": conn_config.endpoint_url,
"api_version": self.api_version,
"use_ssl": self.use_ssl,
}
)
session = SessionFactory(conn=conn_config).create_session()
return session.client(service_name="ssm", **client_kwargs)
def get_conn_value(self, conn_id: str) -> str | None:
"""
Get param value.
:param conn_id: connection id
"""
if self.connections_prefix is None:
return None
return self._get_secret(self.connections_prefix, conn_id, self.connections_lookup_pattern)
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key, self.variables_lookup_pattern)
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key, self.config_lookup_pattern)
def _get_secret(self, path_prefix: str, secret_id: str, lookup_pattern: str | None) -> str | None:
"""
Get secret value from Parameter Store.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Systems Manager
"""
if lookup_pattern and not re.match(lookup_pattern, secret_id, re.IGNORECASE):
return None
ssm_path = self.build_path(path_prefix, secret_id)
ssm_path = self._ensure_leading_slash(ssm_path)
try:
response = self.client.get_parameter(Name=ssm_path, WithDecryption=True)
return response["Parameter"]["Value"]
except self.client.exceptions.ParameterNotFound:
self.log.debug("Parameter %s not found.", ssm_path)
return None
def _ensure_leading_slash(self, ssm_path: str):
"""
AWS Systems Manager mandate to have a leading "/". Adding it dynamically if not there to the SSM path.
:param ssm_path: SSM parameter path
"""
if not ssm_path.startswith("/"):
ssm_path = f"/{ssm_path}"
return ssm_path
| SystemsManagerParameterStoreBackend |
python | cython__cython | tests/run/test_asyncgen.py | {
"start": 3554,
"end": 4250
} | class ____(Exception):
pass
@types_coroutine
def awaitable(*, throw=False):
if throw:
yield ('throw',)
else:
yield ('result',)
def run_until_complete(coro):
exc = False
while True:
try:
if exc:
exc = False
fut = coro.throw(AwaitException)
else:
fut = coro.send(None)
except StopIteration as ex:
return ex.args[0]
if fut == ('throw',):
exc = True
def to_list(gen):
async def iterate():
res = []
async for i in gen:
res.append(i)
return res
return run_until_complete(iterate())
| AwaitException |
python | jina-ai__jina | jina/excepts.py | {
"start": 2541,
"end": 2664
} | class ____(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
| PortAlreadyUsed |
python | RaRe-Technologies__gensim | gensim/test/test_tmdiff.py | {
"start": 356,
"end": 2944
} | class ____(unittest.TestCase):
def setUp(self):
self.dictionary = common_dictionary
self.corpus = common_corpus
self.num_topics = 5
self.n_ann_terms = 10
self.model = LdaModel(corpus=self.corpus, id2word=self.dictionary, num_topics=self.num_topics, passes=10)
def test_basic(self):
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms)
self.assertEqual(mdiff.shape, (self.num_topics, self.num_topics))
self.assertEqual(len(annotation), self.num_topics)
self.assertEqual(len(annotation[0]), self.num_topics)
# test for diagonal case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, diagonal=True)
self.assertEqual(mdiff.shape, (self.num_topics,))
self.assertEqual(len(annotation), self.num_topics)
def test_identity(self):
for dist_name in ["hellinger", "kullback_leibler", "jaccard"]:
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name)
for row in annotation:
for (int_tokens, diff_tokens) in row:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(np.diag(mdiff), np.zeros(mdiff.shape[0], dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
# test for diagonal case
mdiff, annotation = \
self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name, diagonal=True)
for (int_tokens, diff_tokens) in annotation:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
def test_input(self):
self.assertRaises(ValueError, self.model.diff, self.model, n_ann_terms=self.n_ann_terms, distance='something')
self.assertRaises(ValueError, self.model.diff, [], n_ann_terms=self.n_ann_terms, distance='something')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestLdaDiff |
python | django__django | django/db/migrations/operations/models.py | {
"start": 42754,
"end": 44444
} | class ____(IndexOperation):
category = OperationCategory.REMOVAL
option_name = "constraints"
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_constraint(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
constraint = from_model_state.get_constraint_by_name(self.name)
schema_editor.remove_constraint(model, constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
constraint = to_model_state.get_constraint_by_name(self.name)
schema_editor.add_constraint(model, constraint)
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"name": self.name,
},
)
def describe(self):
return "Remove constraint %s from model %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
| RemoveConstraint |
python | getsentry__sentry | tests/sentry/notifications/platform/msteams/test_provider.py | {
"start": 8815,
"end": 11973
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration, self.org_integration = self.create_provider_integration_for(
provider=IntegrationProviderSlug.MSTEAMS,
organization=self.organization,
user=self.user,
name="test-msteams",
metadata={"service_url": "https://test.msteams.com", "team_id": "test-team-id"},
)
def _create_target(
self, resource_id: str = "19:test-channel@thread.skype"
) -> IntegrationNotificationTarget:
target = IntegrationNotificationTarget(
provider_key=NotificationProviderKey.MSTEAMS,
resource_id=resource_id,
resource_type=NotificationTargetResourceType.CHANNEL,
integration_id=self.integration.id,
organization_id=self.organization.id,
)
return target
def _create_renderable(self) -> MSTeamsRenderable:
"""Create a sample MSTeamsRenderable for testing"""
from sentry.integrations.msteams.card_builder.block import (
ADAPTIVE_CARD_SCHEMA_URL,
CURRENT_CARD_VERSION,
create_text_block,
)
return {
"type": "AdaptiveCard",
"body": [
create_text_block(text="Test Notification"),
create_text_block(text="This is a test message"),
],
"version": CURRENT_CARD_VERSION,
"$schema": ADAPTIVE_CARD_SCHEMA_URL,
}
@patch("sentry.integrations.msteams.integration.MsTeamsClient")
def test_send_success(self, mock_msteams_client: Mock) -> None:
"""Test successful message sending"""
mock_client_instance = mock_msteams_client.return_value
mock_client_instance.send_card.return_value = {"id": "1234567890"}
target = self._create_target()
renderable = self._create_renderable()
MSTeamsNotificationProvider.send(target=target, renderable=renderable)
mock_client_instance.send_card.assert_called_once_with(
conversation_id="19:test-channel@thread.skype", card=renderable
)
@patch("sentry.integrations.msteams.integration.MsTeamsClient")
def test_send_to_direct_message(self, mock_msteams_client: Mock) -> None:
"""Test sending message to direct message (user)"""
mock_client_instance = mock_msteams_client.return_value
mock_client_instance.send_card.return_value = {"id": "1234567890"}
target = IntegrationNotificationTarget(
provider_key=NotificationProviderKey.MSTEAMS,
resource_id="29:test-user-id", # User conversation ID format
resource_type=NotificationTargetResourceType.DIRECT_MESSAGE,
integration_id=self.integration.id,
organization_id=self.organization.id,
)
renderable = self._create_renderable()
MSTeamsNotificationProvider.send(target=target, renderable=renderable)
mock_client_instance.send_card.assert_called_once_with(
conversation_id="29:test-user-id", card=renderable
)
| MSTeamsNotificationProviderSendTest |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/bedrock/_beta.py | {
"start": 459,
"end": 1372
} | class ____(SyncAPIResource):
@cached_property
def messages(self) -> Messages:
return Messages(self._client)
@cached_property
def with_raw_response(self) -> BetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return BetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return BetaWithStreamingResponse(self)
| Beta |
python | langchain-ai__langchain | libs/core/langchain_core/exceptions.py | {
"start": 183,
"end": 283
} | class ____(LangChainException):
"""Base class for exceptions in tracers module."""
| TracerException |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1004980,
"end": 1005668
} | class ____(ValueChannelMixin, core.ValueDefnumber):
"""
XErrorValue schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| XErrorValue |
python | getsentry__sentry | src/sentry/auth_v2/utils/session.py | {
"start": 341,
"end": 853
} | class ____(TypedDict, total=False):
# Flags to control the authentication flow on frontend.
# Keep the keys sorted in order of importance!!
# Maintaining the hierarchy is good context for future engineers.
todoEmailVerification: bool | None
todo2faVerification: bool | None
todoPasswordReset: bool | None
todo2faSetup: bool | None
userId: str | None
sessionCsrfToken: str | None
sessionExpiryDate: datetime | None
sessionOrgs: list[str] | None
| SessionSerializerResponse |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 95479,
"end": 96109
} | class ____(nn.Module):
def __init__(self, config: DFineConfig):
super().__init__()
self.layers = nn.ModuleList([DFineEncoderLayer(config) for _ in range(config.encoder_layers)])
def forward(self, src, src_mask=None, pos_embed=None, output_attentions: bool = False) -> torch.Tensor:
hidden_states = src
for layer in self.layers:
hidden_states = layer(
hidden_states,
attention_mask=src_mask,
position_embeddings=pos_embed,
output_attentions=output_attentions,
)
return hidden_states
| DFineEncoder |
python | lepture__authlib | tests/flask/test_oauth1/oauth1_server.py | {
"start": 1691,
"end": 2227
} | class ____(TokenCredentialMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
client_id = db.Column(db.String(48), index=True)
oauth_token = db.Column(db.String(84), unique=True, index=True)
oauth_token_secret = db.Column(db.String(84))
def get_oauth_token(self):
return self.oauth_token
def get_oauth_token_secret(self):
return self.oauth_token_secret
| TokenCredential |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 21270,
"end": 22142
} | class ____(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
# Resolution of members for record and structured arrays
ary, idx = args
if (isinstance(ary, types.Array) and isinstance(idx, str) and
isinstance(ary.dtype, types.Record)):
if idx in ary.dtype.fields:
attr_dtype = ary.dtype.typeof(idx)
if isinstance(attr_dtype, types.NestedArray):
ret = ary.copy(
dtype=attr_dtype.dtype,
ndim=ary.ndim + attr_dtype.ndim,
layout='A'
)
return signature(ret, *args)
else:
ret = ary.copy(dtype=attr_dtype, layout='A')
return signature(ret, *args)
@infer_getattr
| StaticGetItemArray |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 4421,
"end": 4887
} | class ____:
"""Test fi_FI bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{14}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == FiFiBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{14}", iban[2:])
| TestFiFi |
python | huggingface__transformers | src/transformers/models/bit/modeling_bit.py | {
"start": 6995,
"end": 8043
} | class ____(nn.MaxPool2d):
def __init__(
self,
kernel_size: int,
stride=None,
dilation=1,
ceil_mode=False,
padding=(0, 0),
padding_value=0,
use_dynamic_padding=True,
):
kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
if use_dynamic_padding:
self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
else:
self.pad = nn.Identity()
def forward(self, hidden_states):
hidden_states = self.pad(hidden_states)
return nn.functional.max_pool2d(
hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
)
| BitMaxPool2d |
python | gevent__gevent | src/gevent/tests/test__greenness.py | {
"start": 1781,
"end": 1948
} | class ____(SimpleHTTPRequestHandler, object):
def log_message(self, *args): # pylint:disable=arguments-differ
self.server.messages += ((args,),)
| QuietHandler |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 208997,
"end": 209564
} | class ____(object):
# https://argoproj.github.io/argo-workflows/fields/#arguments
def __init__(self):
tree = lambda: defaultdict(tree)
self.payload = tree()
def parameters(self, parameters):
if "parameters" not in self.payload:
self.payload["parameters"] = []
for parameter in parameters:
self.payload["parameters"].append(parameter.to_json())
return self
def to_json(self):
return self.payload
def __str__(self):
return json.dumps(self.payload, indent=4)
| Arguments |
python | celery__celery | t/unit/worker/test_bootsteps.py | {
"start": 3857,
"end": 5005
} | class ____:
class Def(bootsteps.StartStopStep):
name = 'test_StartStopStep.Def'
def setup_method(self):
self.steps = []
def test_start__stop(self):
x = self.Def(self)
x.create = Mock()
# include creates the underlying object and sets
# its x.obj attribute to it, as well as appending
# it to the parent.steps list.
x.include(self)
assert self.steps
assert self.steps[0] is x
x.start(self)
x.obj.start.assert_called_with()
x.stop(self)
x.obj.stop.assert_called_with()
x.obj = None
assert x.start(self) is None
def test_terminate__no_obj(self):
x = self.Def(self)
x.obj = None
x.terminate(Mock())
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.include(self)
assert not self.steps
def test_terminate(self):
x = self.Def(self)
x.create = Mock()
x.include(self)
delattr(x.obj, 'terminate')
x.terminate(self)
x.obj.stop.assert_called_with()
| test_StartStopStep |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_poly_persistence.py | {
"start": 2884,
"end": 5763
} | class ____(PolymorphTest):
def test_insert_order(self):
"""test that classes of multiple types mix up mapper inserts
so that insert order of individual tables is maintained"""
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
"person": people.select()
.where(people.c.type == "person")
.subquery(),
},
None,
"pjoin",
)
person_mapper = self.mapper_registry.map_imperatively(
Person,
people,
with_polymorphic=("*", person_join),
polymorphic_on=person_join.c.type,
polymorphic_identity="person",
)
self.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=person_mapper,
polymorphic_identity="engineer",
)
self.mapper_registry.map_imperatively(
Manager,
managers,
inherits=person_mapper,
polymorphic_identity="manager",
)
self.mapper_registry.map_imperatively(
Company,
companies,
properties={
"employees": relationship(
Person, backref="company", order_by=person_join.c.person_id
)
},
)
session = fixture_session()
c = Company(name="company1")
c.employees.append(
Manager(
status="AAB",
manager_name="manager1",
name="pointy haired boss",
)
)
c.employees.append(
Engineer(
status="BBA",
engineer_name="engineer1",
primary_language="java",
name="dilbert",
)
)
c.employees.append(Person(status="HHH", name="joesmith"))
c.employees.append(
Engineer(
status="CGG",
engineer_name="engineer2",
primary_language="python",
name="wally",
)
)
c.employees.append(
Manager(status="ABA", manager_name="manager2", name="jsmith")
)
session.add(c)
session.flush()
session.expunge_all()
eq_(session.get(Company, c.company_id), c)
@testing.combinations(
("lazy", True), ("nonlazy", False), argnames="lazy_relationship", id_="ia"
)
@testing.combinations(
("redefine", True),
("noredefine", False),
argnames="redefine_colprop",
id_="ia",
)
@testing.combinations(
("unions", True),
("unions", False),
("joins", False),
("auto", False),
("none", False),
argnames="with_polymorphic,include_base",
id_="rr",
)
| InsertOrderTest |
python | google__pytype | pytype/errors/error_printer.py | {
"start": 371,
"end": 493
} | class ____:
expected: str
bad_actual: str
full_actual: str
error_details: list[str]
@dataclasses.dataclass
| BadReturn |
python | tensorflow__tensorflow | tensorflow/python/framework/memory_checker_test.py | {
"start": 987,
"end": 3885
} | class ____(test.TestCase):
def testNoLeakEmpty(self):
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testNoLeak1(self):
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
x = constant_op.constant(1) # pylint: disable=unused-variable
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testNoLeak3(self):
with MemoryChecker() as memory_checker:
tensors = []
for i in range(10):
if i not in (5, 7):
tensors.append(constant_op.constant(1))
memory_checker.record_snapshot()
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testLeak1(self):
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
x = constant_op.constant(1) # pylint: disable=unused-variable
memory_checker.record_snapshot()
y = constant_op.constant(1) # pylint: disable=unused-variable
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.report()
with self.assertRaises(AssertionError):
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testLeak3(self):
with MemoryChecker() as memory_checker:
tensors = []
for _ in range(10):
tensors.append(constant_op.constant(1))
memory_checker.record_snapshot()
memory_checker.report()
with self.assertRaises(AssertionError):
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testNoNewPythonObjectsEmpty(self):
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
memory_checker.record_snapshot()
memory_checker.assert_no_new_python_objects()
def testNewPythonObjects(self):
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
x = constant_op.constant(1)
memory_checker.record_snapshot()
with self.assertRaisesRegex(AssertionError, 'New Python objects'):
memory_checker.assert_no_new_python_objects()
# use x to avoid any potential for optimizing it away.
self.assertIsNot(x, None)
def testNewPythonObjectBelowThreshold(self):
class Foo(object):
pass
with MemoryChecker() as memory_checker:
memory_checker.record_snapshot()
foo = Foo()
del foo
memory_checker.record_snapshot()
memory_checker.assert_no_new_python_objects()
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| MemoryCheckerTest |
python | doocs__leetcode | lcof/面试题51. 数组中的逆序对/Solution2.py | {
"start": 343,
"end": 674
} | class ____:
def reversePairs(self, nums: List[int]) -> int:
alls = sorted(set(nums))
m = len(alls)
tree = BinaryIndexedTree(m)
ans = 0
for v in nums[::-1]:
x = bisect_left(alls, v) + 1
ans += tree.query(x - 1)
tree.update(x, 1)
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 63514,
"end": 63772
} | class ____(ActionTool):
''' A tool that allows to enlarge a UI element to fullscreen. '''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| FullscreenTool |
python | django__django | tests/admin_inlines/admin.py | {
"start": 5579,
"end": 5687
} | class ____(admin.TabularInline):
model = ShoppingWeakness
form = WeaknessForm
| WeaknessInlineCustomForm |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 89556,
"end": 91497
} | class ____(gdb.Command):
def __init__(self, command, actual_command):
super().__init__(command, gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.actual_command = actual_command
def fix_gdb(self):
"""
It seems that invoking either 'cy exec' and 'py-exec' work perfectly
fine, but after this gdb's python API is entirely broken.
Maybe some uncleared exception value is still set?
sys.exc_clear() didn't help. A demonstration:
(gdb) cy exec 'hello'
'hello'
(gdb) python gdb.execute('cont')
RuntimeError: Cannot convert value to int.
Error while executing Python code.
(gdb) python gdb.execute('cont')
[15148 refs]
Program exited normally.
"""
warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
re.escape(__name__))
try:
int(gdb.parse_and_eval("(void *) 0")) == 0
except RuntimeError:
pass
# warnings.resetwarnings()
@dont_suppress_errors
def invoke(self, args, from_tty):
self.fix_gdb()
try:
gdb.execute('%s %s' % (self.actual_command, args))
except RuntimeError as e:
raise gdb.GdbError(str(e))
self.fix_gdb()
def _evalcode_python(executor, code, input_type):
"""
Execute Python code in the most recent stack frame.
"""
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
if (pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0):
raise gdb.GdbError("Unable to find the locals or globals of the "
"most recent Python function (relative to the "
"selected frame).")
return executor.evalcode(code, input_type, global_dict, local_dict)
| FixGdbCommand |
python | django__django | tests/delete_regress/tests.py | {
"start": 9347,
"end": 11926
} | class ____(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
@classmethod
def setUpTestData(cls):
cls.o1 = OrgUnit.objects.create(name="o1")
cls.o2 = OrgUnit.objects.create(name="o2")
cls.l1 = Login.objects.create(description="l1", orgunit=cls.o1)
cls.l2 = Login.objects.create(description="l2", orgunit=cls.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by("description").filter(
orgunit__name__isnull=False
).annotate(n=models.Count("description")).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by("description").filter(
orgunit__name__isnull=False
).extra(select={"extraf": "1"}).filter(pk=self.l1.pk).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(pk=self.l1.pk).filter(
orgunit__name__isnull=False
).order_by("description").select_related("orgunit").delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(pk=self.l1.pk).filter(
orgunit__name__isnull=False
).order_by("description").only("id").delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
| Ticket19102Tests |
python | joke2k__faker | faker/providers/geo/en_IE/__init__.py | {
"start": 41,
"end": 3029
} | class ____(GeoProvider):
# Source: https://www.latlong.net/category/towns-106-55.html
land_coords = (
(
"53.944000",
"-8.095000",
"Carrish on Shannon, Leitrim,",
"IE",
"Europe/Dublin",
),
("52.354279", "-7.695040", "Clonmel, Co. Tipperary,", "IE", "Europe/Dublin"),
(
"52.668076",
"-8.536722",
"Annacotty, County Limerick,",
"IE",
"Europe/Dublin",
),
("52.808887", "-8.447515", "Killaloe, Co. Clare,", "IE", "Europe/Dublin"),
("52.059937", "-9.504427", "Killarney, Co. Kerry,", "IE", "Europe/Dublin"),
("51.850334", "-8.294286", "Cobh, Co. Cork,", "IE", "Europe/Dublin"),
("51.842426", "-8.830432", "Crookstown, Co. Cork,", "IE", "Europe/Dublin"),
("53.802132", "-9.514347", "Westport, Co. Mayo,", "IE", "Europe/Dublin"),
("52.856201", "-9.400776", "Miltown Malbay, Co. Clare,", "IE", "Europe/Dublin"),
("53.016029", "-9.377420", "Doolin, Co. Clare,", "IE", "Europe/Dublin"),
("53.011299", "-6.326156", "Glendalough, Co. Wicklow,", "IE", "Europe/Dublin"),
("53.342617", "-7.613920", "Clara, Co. Offaly,", "IE", "Europe/Dublin"),
("51.555923", "-9.262130", "Skibbereen, County Cork,", "IE", "Europe/Dublin"),
("53.525932", "-7.338138", "Mullingar, Co. Westmeath,", "IE", "Europe/Dublin"),
("53.357208", "-6.449849", "Lucan, Co. Dublin,", "IE", "Europe/Dublin"),
("52.336918", "-6.463338", "Wexford", "IE", "Europe/Dublin"),
("53.381290", "-6.591850", "Maynooth, Co. Kildare,", "IE", "Europe/Dublin"),
("53.220566", "-6.659308", "Naas, Co. Kildare,", "IE", "Europe/Dublin"),
("53.176861", "-6.802586", "Newbridge, Co. Kildare,", "IE", "Europe/Dublin"),
("52.847054", "-8.988436", "Ennis, Clare,", "IE", "Europe/Dublin"),
("53.293785", "-6.687040", "Clane, Co. Kildare,", "IE", "Europe/Dublin"),
("52.703434", "-6.956577", "Mhuine Bheag, Co. Carlow,", "IE", "Europe/Dublin"),
("51.555923", "-9.262130", "Skibbereen, Co. Cork,", "IE", "Europe/Dublin"),
("53.580429", "-6.106339", "Skerries, Co. Dublin,", "IE", "Europe/Dublin"),
("52.138977", "-8.653916", "Mallow, Co. Cork,", "IE", "Europe/Dublin"),
("53.653042", "-6.684230", "Navan, Co. Meath,", "IE", "Europe/Dublin"),
("51.880878", "-8.658640", "Ovens, Co. Cork,", "IE", "Europe/Dublin"),
("53.279690", "-6.181159", "Newpark, Dublin,", "IE", "Europe/Dublin"),
("51.706051", "-8.522501", "Kinsale, Co. Cork,", "IE", "Europe/Dublin"),
("53.580551", "-6.107878", "Skerries, Co. Dublin,", "IE", "Europe/Dublin"),
("55.194862", "-7.836965", "Downings, Co. Donegal,", "IE", "Europe/Dublin"),
("53.514114", "-8.856518", "Tuam, Co. Galway,", "IE", "Europe/Dublin"),
("53.270962", "-9.062691", "Galway", "IE", "Europe/Dublin"),
)
| Provider |
python | pytorch__pytorch | torch/_inductor/codegen/rocm/rocm_cpp_scheduling.py | {
"start": 481,
"end": 3878
} | class ____(BaseScheduling):
"""
Partial Scheduling implementation for ROCm C++ Kernels.
This class is intended to be used in combination with TritonScheduling,
and delegated to by CUDACombinedScheduling.
It handles fusion decisions and ROCm C++ specific template code generation.
"""
def group_fn(self, sizes):
return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
@staticmethod
def is_rocm_cpp_template(node: BaseSchedulerNode) -> bool:
return isinstance(node, SchedulerNode) and isinstance(
node.node, ROCmTemplateBuffer
)
def can_fuse_vertical(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
return False
def define_kernel(self, src_code: str, node_schedule) -> str:
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
fused_name = (
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
if config.triton.descriptive_names
else ""
)
kernel_name = "_".join(["rocm", fused_name, wrapper.next_kernel_suffix()])
# use the original src_code as the key
wrapper.src_to_kernel[src_code] = kernel_name
src_code = src_code.replace("KERNEL_NAME", kernel_name)
_, _, kernel_path = get_path(code_hash(src_code), "py")
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline("async_compile.rocm(r'''")
compile_wrapper.splice(src_code, strip=True)
compile_wrapper.writeline(
f"''', 'so', aot_compile={str(V.graph.aot_mode)})"
)
metadata_comment = f"# kernel path: {kernel_path}"
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment += "\n" + origins + "\n" + detailed_origins
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
return kernel_name
def codegen_template(
self,
template_node: BaseSchedulerNode,
epilogue_nodes: Sequence[BaseSchedulerNode],
prologue_nodes: Sequence[BaseSchedulerNode],
):
"""
Codegen a ROCm template, possibly with fused epilogues
"""
assert self.is_rocm_cpp_template(template_node), (
"Template node passed to ROCmScheduler.codegen_template must be a SchedulerNode that wraps a ROCmTemplateBuffer"
)
template_node = cast(SchedulerNode, template_node)
_, (_numel, rnumel) = template_node.group
assert rnumel == 1
ctb: ROCmTemplateBuffer = cast(ROCmTemplateBuffer, template_node.node)
kernel, render = ctb.make_kernel_render(ctb) # type: ignore[misc]
with kernel:
template_node.mark_run()
src_code = render()
with V.set_kernel_handler(kernel):
node_schedule = [template_node]
kernel_name = self.define_kernel(src_code, node_schedule)
self.codegen_comment(node_schedule, kernel_name)
kernel.call_kernel(kernel_name, ctb)
V.graph.removed_buffers |= kernel.removed_buffers
self.free_buffers_in_scheduler()
| ROCmCPPScheduling |
python | tiangolo__fastapi | fastapi/security/http.py | {
"start": 7069,
"end": 10264
} | class ____(HTTPBase):
"""
HTTP Bearer token authentication.
## Usage
Create an instance object and use that object as the dependency in `Depends()`.
The dependency result will be an `HTTPAuthorizationCredentials` object containing
the `scheme` and the `credentials`.
## Example
```python
from typing import Annotated
from fastapi import Depends, FastAPI
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
app = FastAPI()
security = HTTPBearer()
@app.get("/users/me")
def read_current_user(
credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]
):
return {"scheme": credentials.scheme, "credentials": credentials.credentials}
```
"""
def __init__(
self,
*,
bearerFormat: Annotated[Optional[str], Doc("Bearer token format.")] = None,
scheme_name: Annotated[
Optional[str],
Doc(
"""
Security scheme name.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
Security scheme description.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
auto_error: Annotated[
bool,
Doc(
"""
By default, if the HTTP Bearer token is not provided (in an
`Authorization` header), `HTTPBearer` will automatically cancel the
request and send the client an error.
If `auto_error` is set to `False`, when the HTTP Bearer token
is not available, instead of erroring out, the dependency result will
be `None`.
This is useful when you want to have optional authentication.
It is also useful when you want to have authentication that can be
provided in one of multiple optional ways (for example, in an HTTP
Bearer token or in a cookie).
"""
),
] = True,
):
self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
if scheme.lower() != "bearer":
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
| HTTPBearer |
python | numba__llvmlite | llvmlite/ir/types.py | {
"start": 18490,
"end": 20079
} | class ____(BaseStructType):
"""
A type which is a named alias for another struct type, akin to a typedef.
While literal struct types can be structurally equal (see
LiteralStructType), identified struct types are compared by name.
Do not use this directly.
"""
null = 'zeroinitializer'
def __init__(self, context, name, packed=False):
"""
*context* is a llvmlite.ir.Context.
*name* is the identifier for the new struct type.
*packed* controls the use of packed layout.
"""
assert name
self.context = context
self.name = name
self.elements = None
self.packed = packed
def _to_string(self):
return "%{name}".format(name=_wrapname(self.name))
def get_declaration(self):
"""
Returns the string for the declaration of the type
"""
if self.is_opaque:
out = "{strrep} = type opaque".format(strrep=str(self))
else:
out = "{strrep} = type {struct}".format(
strrep=str(self), struct=self.structure_repr())
return out
def __eq__(self, other):
if isinstance(other, IdentifiedStructType):
return (self.name == other.name
and self.packed == other.packed)
def __hash__(self):
return hash(IdentifiedStructType)
def set_body(self, *elems):
if not self.is_opaque:
raise RuntimeError("{name} is already defined".format(
name=self.name))
self.elements = tuple(elems)
| IdentifiedStructType |
python | huggingface__transformers | src/transformers/models/minimax/modeling_minimax.py | {
"start": 40342,
"end": 40445
} | class ____(GenericForTokenClassification, MiniMaxPreTrainedModel):
pass
| MiniMaxForTokenClassification |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 61,
"end": 500
} | class ____:
def keys(self):
raise NotImplementedError()
def add(self, spec=None, add_book=None, xl=None, visible=None):
raise NotImplementedError()
@staticmethod
def cleanup():
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def __getitem__(self, pid):
raise NotImplementedError()
| Apps |
python | pytorch__pytorch | test/distributed/_composable/test_checkpoint.py | {
"start": 2848,
"end": 11220
} | class ____(TestCase):
def _get_graph_size(self, out: torch.Tensor) -> int:
q = deque([out.grad_fn])
num_functions = 0
while len(q):
fn = q.pop()
num_functions += 1
for next_fn, _ in fn.next_functions:
if next_fn:
q.append(next_fn)
return num_functions
def _test_tensor_only(
self,
net: nn.Module,
x: torch.Tensor,
) -> None:
x1 = x.clone()
x2 = x.clone()
x1.requires_grad = True
x2.requires_grad = True
net1 = net
net2 = deepcopy(net)
# no checkpoint
with MemoryDelta(x.device) as mem1:
loss1 = net1(x1).sum()
loss1.backward()
# with checkpoint
checkpoint(net2.seq)
with MemoryDelta(x.device) as mem2:
loss2 = net2(x2).sum()
loss2.backward()
if x.is_cuda or x.is_xpu:
self.assertTrue(mem2.delta() < mem1.delta())
for p1, p2 in zip(net1.parameters(), net2.parameters()):
self.assertEqual(p1.grad, p2.grad)
def test_tensor_only_cpu(self):
x = torch.randn(20, 100)
net = ToyModel()
self._test_tensor_only(net, x)
@unittest.skipIf(not TEST_CUDA and not TEST_XPU, "no cuda/xpu")
def test_tensor_only_gpu(self):
x = torch.randn(20, 100, device=f"{device_type}:0")
net = ToyModel().to(f"{device_type}:0")
self._test_tensor_only(net, x)
def test_random_cpu(self):
x1 = torch.randn(20, 100, requires_grad=True)
x2 = x1.clone()
net1 = RandomModel()
net2 = deepcopy(net1)
cpu_rng_state = torch.get_rng_state()
net1(x1).sum().backward()
torch.set_rng_state(cpu_rng_state)
checkpoint(net2)(x2).sum().backward()
for p1, p2 in zip(net1.parameters(), net2.parameters()):
self.assertEqual(p1.grad, p2.grad)
def test_multi_args(self):
"""
Tests checkpoint for modules with multiple output args and hence
multiple backward function input args.
"""
device = torch.device("cpu")
net1 = nn.Sequential(
MultiOutputModel(device),
MultiInputModel(device),
MultiOutputModel(device),
MultiInputModel(device),
)
net2 = deepcopy(net1)
checkpoint(net2[0])
checkpoint(net2[2])
x1 = torch.randn(20, 100, requires_grad=True)
x2 = x1.clone()
net1(x1).sum().backward()
net2(x2).sum().backward()
for p1, p2 in zip(net1.parameters(), net2.parameters()):
self.assertEqual(p1.grad, p2.grad)
def test_clears_state_on_error_in_forward(self):
class MyModel(torch.nn.Module):
def __init__(self, raise_in_recomp):
super().__init__()
self.fwd_count = 0
self.raise_in_recomp = raise_in_recomp
self.a = torch.nn.Linear(2, 2)
def forward(self, x):
if self.raise_in_recomp and self.fwd_count == 1:
raise RuntimeError("foo")
else:
if not self.raise_in_recomp:
# raise in the first forward
raise RuntimeError("foo")
self.fwd_count += 1
return self.a(x)
m = MyModel(raise_in_recomp=True)
m_seq = torch.nn.Sequential(OrderedDict({"m": m}))
checkpoint(m_seq.m)
inp = torch.randn(1, 2)
out = m_seq(inp).sum()
# Should raise in forward recomputation
with self.assertRaisesRegex(RuntimeError, "foo"):
out.backward()
# Check that _ac_generator is cleared out
self.assertEqual(None, checkpoint.state(m)._ac_generator)
m = MyModel(raise_in_recomp=False)
checkpoint(m)
inp = torch.randn(1, 2)
# Should raise in first forward
with self.assertRaises(RuntimeError):
m(inp)
self.assertEqual(None, checkpoint.state(m)._ac_generator)
def test_checkpoint_kwargs(self):
class MyModel(torch.nn.Module):
def __init__(self, raise_exp: bool, change_shape_in_recomp: bool):
super().__init__()
self.fwd_count = 0
self.raise_exp = raise_exp
self.change_shape_in_recomp = change_shape_in_recomp
self.a = torch.nn.Linear(2, 2)
def forward(self, x):
if self.raise_exp and self.fwd_count == 0:
raise RuntimeError("foo")
if self.raise_exp and self.fwd_count == 1:
raise RuntimeError("bar")
if self.change_shape_in_recomp and self.fwd_count == 1:
x.relu_()
random_tensor = torch.randn(1, 2)
x = self.a(x + random_tensor)
self.fwd_count += 1
return x
m = MyModel(True, False)
m0, m1, m2, m3 = (deepcopy(m) for _ in range(4))
# composable checkpoint does not support use_reentrant=True
with self.assertRaisesRegex(
NotImplementedError,
"use_reentrant=True is not supported in composable checkpoint. "
"Please use torch.utils.checkpoint.checkpoint instead.",
):
checkpoint(m, use_reentrant=True)
# check giving an unsupported kwarg
with self.assertRaisesRegex(ValueError, "Unexpected keyword arguments: foo"):
checkpoint(m0, foo="bar")
handled_fwd_exp = False
handled_recomp_exp = False
@contextmanager
def fwd_ctx(mod: MyModel):
try:
mod.raise_exp = False
yield
finally:
nonlocal handled_fwd_exp
handled_fwd_exp = True
mod.raise_exp = True
@contextmanager
def recomp_ctx(mod: MyModel):
try:
mod.raise_exp = False
yield
finally:
nonlocal handled_recomp_exp
handled_recomp_exp = True
mod.raise_exp = True
# Test different context functions
x = torch.randn(1, 2, requires_grad=True)
checkpoint(
m1, context_fn=lambda: (partial(fwd_ctx, m1)(), partial(recomp_ctx, m1)())
)
m1(x.clone()).sum().backward()
self.assertEqual((handled_fwd_exp, handled_recomp_exp), (True, True))
checkpoint(m2, context_fn=lambda: (nullcontext(), partial(recomp_ctx, m2)()))
with self.assertRaisesRegex(RuntimeError, "foo"):
m2(x.clone())
handled_fwd_exp = False # Reset flag
checkpoint(m3, context_fn=lambda: (partial(fwd_ctx, m3)(), nullcontext()))
with self.assertRaisesRegex(RuntimeError, "bar"):
m3(x.clone()).sum().backward()
self.assertEqual(handled_fwd_exp, True)
# Test determinism check failure
m4 = MyModel(False, True)
m5 = deepcopy(m4)
# Determinism check should not throw an error,
# but autograd should throw a RuntimeError
checkpoint(m4, determinism_check="none")
with self.assertRaises(RuntimeError):
m4(x.clone()).sum().backward()
# Determinism check should throw a CheckpointError
checkpoint(m5, determinism_check="default")
with self.assertRaises(CheckpointError):
m5(x.clone()).sum().backward()
# Test preserving random state
m6 = MyModel(False, False)
m7, m8 = (deepcopy(m6) for _ in range(2))
checkpoint(m7, preserve_rng_state=False)
checkpoint(m8, preserve_rng_state=True)
for mi in (m6, m7, m8):
torch.manual_seed(42)
loss = mi(x.clone()).sum()
torch.manual_seed(41)
loss.backward()
# check that m6 and m7 have at least one different grad
self.assertNotEqual(
(p1.grad for p1 in m6.parameters()), (p2.grad for p2 in m7.parameters())
)
# check that m6 and m8 have identical grads
for p1, p2 in zip(m6.parameters(), m8.parameters()):
self.assertEqual(p1.grad, p2.grad)
if __name__ == "__main__":
run_tests()
| TestCheckpoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.