language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/distributions/relaxed_categorical.py | {
"start": 552,
"end": 3825
} | class ____(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Implementation based on [1].
See also: :func:`torch.distributions.OneHotCategorical`
Args:
temperature (Tensor): relaxation temperature
probs (Tensor): event probabilities
logits (Tensor): unnormalized log probability for each event
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
(Maddison et al., 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al., 2017)
"""
# pyrefly: ignore [bad-override]
arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector}
support = (
constraints.real_vector
) # The true support is actually a submanifold of this.
has_rsample = True
def __init__(
self,
temperature: Tensor,
probs: Optional[Tensor] = None,
logits: Optional[Tensor] = None,
validate_args: Optional[bool] = None,
) -> None:
self._categorical = Categorical(probs, logits)
self.temperature = temperature
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
new._categorical = self._categorical.expand(batch_shape)
super(ExpRelaxedCategorical, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def param_shape(self) -> torch.Size:
return self._categorical.param_shape
@property
def logits(self) -> Tensor:
return self._categorical.logits
@property
def probs(self) -> Tensor:
return self._categorical.probs
def rsample(self, sample_shape: _size = torch.Size()) -> Tensor:
shape = self._extended_shape(sample_shape)
uniforms = clamp_probs(
torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device)
)
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - scores.logsumexp(dim=-1, keepdim=True)
def log_prob(self, value):
K = self._categorical._num_events
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
log_scale = torch.full_like(
self.temperature, float(K)
).lgamma() - self.temperature.log().mul(-(K - 1))
score = logits - value.mul(self.temperature)
score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
return score + log_scale
| ExpRelaxedCategorical |
python | PyCQA__pylint | tests/functional/n/none_dunder_protocols.py | {
"start": 435,
"end": 507
} | class ____(metaclass=MetaContainer):
__iter__ = None
| NonContainerClass |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/nosource_bundle/package.py | {
"start": 223,
"end": 411
} | class ____(BundlePackage):
"""Simple bundle package with one dependency"""
homepage = "http://www.example.com"
version("1.0")
depends_on("dependency-install")
| NosourceBundle |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_build_details_models.py | {
"start": 1910,
"end": 2282
} | class ____(BaseModel):
state: Literal[PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED] = (
PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED
)
# Deprecated, use size_metrics instead
install_size_bytes: int
# Deprecated, use size_metrics instead
download_size_bytes: int
size_metrics: list[SizeInfoSizeMetric]
| SizeInfoCompleted |
python | sphinx-doc__sphinx | sphinx/builders/latex/transforms.py | {
"start": 16475,
"end": 17376
} | class ____(SphinxPostTransform):
"""Replace pending_xref nodes for citation by citation_reference.
To handle citation reference easily on LaTeX writer, this converts
pending_xref nodes to citation_reference.
"""
default_priority = 5 # before ReferencesResolver
formats = ('latex',)
def run(self, **kwargs: Any) -> None:
domain = self.env.domains.citation_domain
matcher = NodeMatcher(
addnodes.pending_xref, refdomain='citation', reftype='ref'
)
for node in matcher.findall(self.document):
docname, labelid, _ = domain.citations.get(node['reftarget'], ('', '', 0))
if docname:
citation_ref = nodes.citation_reference(
'', '', *node.children, docname=docname, refname=labelid
)
node.replace_self(citation_ref)
| CitationReferenceTransform |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_heapq.py | {
"start": 10979,
"end": 11123
} | class ____(_TestHeap, __TestCase):
module = c_heapq
#==============================================================================
| TestHeapC |
python | RaRe-Technologies__gensim | gensim/models/callbacks.py | {
"start": 5406,
"end": 9093
} | class ____(Metric):
"""Metric class for coherence evaluation.
See Also
--------
:class:`~gensim.models.coherencemodel.CoherenceModel`
"""
def __init__(self, corpus=None, texts=None, dictionary=None, coherence=None,
window_size=None, topn=10, logger=None, viz_env=None, title=None):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
texts : list of char (str of length 1), optional
Tokenized texts needed for coherence models that use sliding window based probability estimator.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping from integer IDs to words, needed to create corpus. If `model.id2word` is present,
this is not needed. If both are provided, `dictionary` will be used.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used. 'c_uci' is also known as 'c_pmi' in the literature.
For 'u_mass', the corpus **MUST** be provided. If `texts` is provided, it will be converted
to corpus using the dictionary. For 'c_v', 'c_uci' and 'c_npmi', `texts` **MUST** be provided.
Corpus is not needed.
window_size : int, optional
Size of the window to be used for coherence measures using boolean
sliding window as their probability estimator. For 'u_mass' this doesn't matter.
If 'None', the default window sizes are used which are:
* `c_v` - 110
* `c_uci` - 10
* `c_npmi` - 10
topn : int, optional
Number of top words to be extracted from each topic.
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the coherence value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.corpus = corpus
self.dictionary = dictionary
self.coherence = coherence
self.texts = texts
self.window_size = window_size
self.topn = topn
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the coherence score.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
One of the following parameters are expected:
* `model` - pre-trained topic model of type :class:`~gensim.models.ldamodel.LdaModel`.
* `topics` - list of tokenized topics.
Returns
-------
float
The coherence score.
"""
# only one of the model or topic would be defined
self.model = None
self.topics = None
super(CoherenceMetric, self).set_parameters(**kwargs)
cm = gensim.models.CoherenceModel(
model=self.model, topics=self.topics, texts=self.texts, corpus=self.corpus,
dictionary=self.dictionary, window_size=self.window_size,
coherence=self.coherence, topn=self.topn
)
return cm.get_coherence()
| CoherenceMetric |
python | jazzband__django-polymorphic | src/polymorphic/admin/inlines.py | {
"start": 9983,
"end": 10291
} | class ____(PolymorphicInlineModelAdmin):
"""
Stacked inline for django-polymorphic models.
Since tabular doesn't make much sense with changed fields, just offer this one.
"""
#: The default template to use.
template = "admin/polymorphic/edit_inline/stacked.html"
| StackedPolymorphicInline |
python | Pylons__pyramid | tests/test_scripts/dummy.py | {
"start": 881,
"end": 962
} | class ____:
def __init__(self):
self.registry = dummy_registry
| DummyApp |
python | pytorch__pytorch | test/distributed/test_symmetric_memory.py | {
"start": 1885,
"end": 10713
} | class ____(MultiProcContinuousTest):
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
def _init_process(self):
torch.cuda.set_device(self.device)
torch.manual_seed(42 + self.rank)
def test_has_multicast_support(self) -> None:
# validate that has_multicast_support() returns "false" instead of throwing
self.assertFalse(_SymmetricMemory.has_multicast_support(DeviceType.CPU, 0))
# NOTE: DeviceType.CUDA is implicitly tested through @requires_multicast_support
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
def test_get_backend(self) -> None:
backend = symm_mem.get_backend(torch.device("cuda"))
self.assertIsNotNone(backend)
backend = symm_mem.get_backend("cuda")
self.assertIsNotNone(backend)
@skip_if_rocm_multiprocess
@skip_if_lt_x_gpu(2)
def test_cuda_nvlink_connectivity_detection(self) -> None:
from torch._C._distributed_c10d import _detect_dma_connectivity
connectivity = _detect_dma_connectivity(DeviceType.CUDA, "nvlink")
self.assertEqual(connectivity.device_type, DeviceType.CUDA)
self.assertEqual(connectivity.connection_type, "nvlink")
self.assertEqual(len(connectivity.matrix), torch.cuda.device_count())
for row in connectivity.matrix:
self.assertEqual(len(row), torch.cuda.device_count())
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
def test_large_alloc(self) -> None:
t = symm_mem.empty(2 * 1024**3, dtype=torch.uint8, device="cuda")
self.assertEqual(t.numel() * t.element_size(), 2 * 1024**3)
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
def test_get_signal_pad(self) -> None:
self._init_process()
t = symm_mem.empty(1, device="cuda")
symm_mem_hdl = symm_mem.rendezvous(t, group=dist.group.WORLD)
peer_rank = (self.rank + 1) % self.world_size
signal_pad = symm_mem_hdl.get_signal_pad(self.rank)
self.assertEqual(
signal_pad.data_ptr(), symm_mem_hdl.signal_pad_ptrs[symm_mem_hdl.rank]
)
signal_pad = symm_mem_hdl.get_signal_pad(peer_rank)
self.assertEqual(signal_pad.dtype, torch.uint32)
self.assertEqual(signal_pad.numel(), symm_mem_hdl.signal_pad_size // 4)
# Only specify sizes
signal_pad = symm_mem_hdl.get_signal_pad(peer_rank, (8, 8))
self.assertEqual(signal_pad.dtype, torch.uint32)
self.assertEqual(signal_pad.numel(), 64)
# Only specify dtype
signal_pad = symm_mem_hdl.get_signal_pad(peer_rank, dtype=torch.uint64)
self.assertEqual(signal_pad.dtype, torch.uint64)
self.assertEqual(signal_pad.numel(), symm_mem_hdl.signal_pad_size // 8)
# Specify both sizes and dtype
signal_pad = symm_mem_hdl.get_signal_pad(peer_rank, (8, 8), dtype=torch.uint64)
self.assertEqual(signal_pad.dtype, torch.uint64)
self.assertEqual(signal_pad.numel(), 64)
# Sanity check that writes to buffer doesn't corrupt signal_pad
t = symm_mem.empty(0, device="cuda")
symm_mem_hdl = symm_mem.rendezvous(t, group=dist.group.WORLD)
signal_pad = symm_mem_hdl.get_signal_pad(self.rank)
signal_pad.fill_(42)
t.fill_(0)
self.assertTrue(signal_pad.eq(42).all())
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@requires_cuda
def test_allow_overlapping_devices(self) -> None:
os.environ["TORCH_SYMM_MEM_ALLOW_OVERLAPPING_DEVICES"] = "1"
t = symm_mem.empty(64, device="cuda:0")
symm_mem_hdl = symm_mem.rendezvous(t, group=dist.group.WORLD)
self.assertEqual(symm_mem_hdl.rank, self.rank)
self.assertEqual(symm_mem_hdl.world_size, self.world_size)
for rank in range(self.world_size):
buf = symm_mem_hdl.get_buffer(rank, (64,), torch.float32)
if rank == self.rank:
self.assertEqual(buf.data_ptr(), t.data_ptr())
else:
self.assertEqual(buf.device, t.device)
os.environ["TORCH_SYMM_MEM_ALLOW_OVERLAPPING_DEVICES"] = "0"
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
@parametrize("symm_mem_input", [True, False])
def test_low_contention_all_gather(self, symm_mem_input: bool) -> None:
self._init_process()
if symm_mem_input:
t = _SymmetricMemory.empty_strided_p2p(
size=(64, 64),
stride=(64, 1),
dtype=torch.float32,
device=self.device,
group_name="0",
).fill_(self.rank)
else:
t = torch.full((64, 64), self.rank, dtype=torch.float32, device=self.device)
res = torch.ops.symm_mem._low_contention_all_gather(t, "0")
res = torch.ops._c10d_functional.wait_tensor(res)
self.assertEqual(res.shape, (64 * self.world_size, 64))
chunks = res.chunk(self.world_size)
for r in range(self.world_size):
self.assertTrue(chunks[r].eq(r).all())
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
@parametrize("reduce_op", ["sum", "avg"])
@parametrize("symm_mem_input", [True, False])
def test_low_contention_reduce_scatter(
self, reduce_op: str, symm_mem_input: bool
) -> None:
self._init_process()
if symm_mem_input:
t = _SymmetricMemory.empty_strided_p2p(
size=(64, 64),
stride=(64, 1),
dtype=torch.float32,
device=self.device,
group_name="0",
)
else:
t = torch.empty((64, 64), dtype=torch.float32, device=self.device)
chunks = t.chunk(self.world_size)
for r in range(self.world_size):
chunks[r].fill_(r)
res = torch.ops.symm_mem._low_contention_reduce_scatter(t, reduce_op, "0")
res = torch.ops._c10d_functional.wait_tensor(res)
self.assertEqual(res.shape, (64 // self.world_size, 64))
if reduce_op == "sum":
expect = self.rank * self.world_size
elif reduce_op == "avg":
expect = self.rank
else:
raise AssertionError(f"Unexpected reduce_op: {reduce_op}")
self.assertTrue(res.eq(expect).all())
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(4)
def test_subgroup(self) -> None:
self._init_process()
ranks = list(range(self.world_size))
subgroup_0 = dist.new_group(ranks[: len(ranks) // 2])
subgroup_1 = dist.new_group(ranks[len(ranks) // 2 :])
world = dist.group.WORLD
subgroup = subgroup_0 if world.rank() < world.size() // 2 else subgroup_1
t = symm_mem.empty(64, device="cuda")
symm_mem_world = symm_mem.rendezvous(t, group=world)
symm_mem_subgroup = symm_mem.rendezvous(t, group=subgroup)
self.assertEqual(symm_mem_world.world_size, world.size())
self.assertEqual(symm_mem_world.rank, world.rank())
self.assertEqual(symm_mem_subgroup.world_size, world.size() // 2)
self.assertEqual(symm_mem_subgroup.rank, world.rank() % subgroup.size())
t.fill_(world.rank())
symm_mem_world.barrier()
# Observe a peer buffer via the world group
peer_rank = (world.rank() + 1) % world.size()
buf = symm_mem_world.get_buffer(peer_rank, (64,), torch.float32)
self.assertTrue(buf.eq(peer_rank).all())
# Observe a peer buffer via the subgroup
peer_rank = (subgroup.rank() + 1) % subgroup.size()
buf = symm_mem_subgroup.get_buffer(peer_rank, (64,), torch.float32)
if world.rank() < world.size() // 2:
self.assertTrue(buf.eq(peer_rank).all())
else:
self.assertTrue(buf.eq(peer_rank + world.size() // 2).all())
# We move AsyncTP tests to a separate test suite because 1) Async TP ops are not
# the core symmetric memory APIs, they are more like applications, 2)
# MultiProcContinuousTest will skip all the following tests if a test fails (
# we should fix this too). We still want to get the test signals for the core
# symmetric memory APIs when Async TP ops fail.
@skip_if_rocm_multiprocess # AsyncTP is not yet supported on ROCm
@instantiate_parametrized_tests
@requires_cuda_p2p_access()
| SymmetricMemoryTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_managed_kafka.py | {
"start": 2990,
"end": 3320
} | class ____:
def test_class_attributes(self):
assert ApacheKafkaTopicLink.key == EXPECTED_MANAGED_KAFKA_TOPIC_LINK_KEY
assert ApacheKafkaTopicLink.name == EXPECTED_MANAGED_KAFKA_TOPIC_LINK_NAME
assert ApacheKafkaTopicLink.format_str == EXPECTED_MANAGED_KAFKA_TOPIC_LINK_FORMAT_STR
| TestApacheKafkaTopicLink |
python | django__django | django/contrib/admin/templatetags/log.py | {
"start": 61,
"end": 2030
} | class ____(template.Node):
def __init__(self, limit, varname, user):
self.limit = limit
self.varname = varname
self.user = user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
entries = context["log_entries"]
if self.user is not None:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = entries.filter(user__pk=user_id)
context[self.varname] = entries[: int(self.limit)]
return ""
@register.tag
def get_admin_log(parser, token):
"""
Populate a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [user_id_or_varname] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``user_id_or_varname`` can be a hard-coded integer (user ID)
or the name of a template context variable containing the user object
whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments"
)
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer"
)
if tokens[2] != "as":
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'"
)
if len(tokens) > 4:
if tokens[4] != "for_user":
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'"
)
return AdminLogNode(
limit=tokens[1],
varname=tokens[3],
user=(tokens[5] if len(tokens) > 5 else None),
)
| AdminLogNode |
python | django__django | django/db/models/functions/math.py | {
"start": 394,
"end": 491
} | class ____(NumericOutputFieldMixin, Transform):
function = "ACOS"
lookup_name = "acos"
| ACos |
python | skorch-dev__skorch | skorch/tests/callbacks/test_lr_scheduler.py | {
"start": 671,
"end": 9743
} | class ____:
@pytest.mark.parametrize('policy', [StepLR, 'StepLR'])
def test_simulate_lrs_epoch_step(self, policy):
lr_sch = LRScheduler(policy, step_size=2)
lrs = lr_sch.simulate(6, 1)
expected = np.array([1.0, 1.0, 0.1, 0.1, 0.01, 0.01])
assert np.allclose(expected, lrs)
@pytest.mark.parametrize('policy', [CyclicLR])
def test_simulate_lrs_batch_step(self, policy):
lr_sch = LRScheduler(
policy, base_lr=1, max_lr=5, step_size_up=4, step_every='batch')
lrs = lr_sch.simulate(11, 1)
expected = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3])
assert np.allclose(expected, lrs)
def test_simulate_lrs_reduced_lr_on_plateau_scalar(self):
# Feed a constant, scalar "loss" to the scheduler.
lr_sch = LRScheduler(
ReduceLROnPlateau, factor=0.1, patience=1,
)
lrs = lr_sch.simulate(
steps=5, initial_lr=1, step_args=0.5
)
# O = OK epoch
# I = intertolerable epoch
#
# 1 2 3 4 5 epoch number
# O I I I I epoch classification
# 0 1 2 1 2 number of bad epochs
# * * epochs with LR reduction
#
# note that simulate returns the lrs before the step, not after,
# so we're seeing only 4 new simulated values.
assert all(lrs == [1, 1, 1, 0.1, 0.1])
def test_simulate_lrs_reduced_lr_on_plateau_array(self):
lr_sch = LRScheduler(
ReduceLROnPlateau, factor=0.1, patience=1,
)
metrics = np.array([0.5, 0.4, 0.4, 0.4, 0.3])
lrs = lr_sch.simulate(
steps=5, initial_lr=1, step_args=metrics
)
# O = OK epoch
# I = intertolerable epoch
#
# 1 2 3 4 5 epoch number
# O O I I O epoch classification
# 0 0 1 2 0 number of bad epochs
# * epochs with LR reduction
#
# note that simulate returns the LRs before the step, not after,
# so we're seeing only 4 new simulated values.
assert all(lrs == [1, 1, 1, 1, 0.1])
@pytest.mark.parametrize('policy, instance, kwargs', [
('LambdaLR', LambdaLR, {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', StepLR, {'step_size': 30}),
('MultiStepLR', MultiStepLR, {'milestones': [30, 90]}),
('ExponentialLR', ExponentialLR, {'gamma': 0.1}),
('ReduceLROnPlateau', ReduceLROnPlateau, {}),
('WarmRestartLR', WarmRestartLR, {}),
('CosineAnnealingLR', CosineAnnealingLR, {'T_max': 5, 'eta_min': 1e-3}),
(WarmRestartLR, WarmRestartLR, {}),
])
def test_lr_callback_init_policies(
self,
classifier_module,
classifier_data,
policy,
instance,
kwargs,
):
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module, max_epochs=2, callbacks=[lr_policy]
)
net.fit(X, y)
assert any(list(map(
lambda x: isinstance(
getattr(x[1], 'lr_scheduler_', None), instance),
net.callbacks_
)))
@pytest.mark.parametrize('policy, kwargs', [
('LambdaLR', {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', {'step_size': 30}),
('MultiStepLR', {'milestones': [30, 90]}),
('ExponentialLR', {'gamma': 0.1}),
('ReduceLROnPlateau', {}),
('WarmRestartLR', {}),
('CosineAnnealingLR', {'T_max': 3}),
])
def test_lr_callback_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
max_epochs = 2
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module(),
max_epochs=max_epochs,
batch_size=16,
callbacks=[lr_policy],
)
net.fit(X, y)
# pylint: disable=protected-access
assert lr_policy.lr_scheduler_.last_epoch == max_epochs
@pytest.mark.parametrize('policy, kwargs', [
(CyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# 80% of sample used for training by default
total_training_iterations_per_epoch = 0.8 * total_iterations_per_epoch
expected = int(total_training_iterations_per_epoch * max_epochs)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
@pytest.mark.parametrize('policy, kwargs', [
(CyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly_fallback(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
# Removes batch count information in the last two epochs
for i in range(max_epochs):
del net.history[i]["train_batch_count"]
del net.history[i]["valid_batch_count"]
net.partial_fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# batch_counts were removed thus the total iterations of the last
# epoch is used
total_iterations_fit_run = total_iterations_per_epoch * max_epochs
# 80% of sample used for training by default
total_iterations_partial_fit_run = (
0.8 * total_iterations_per_epoch * max_epochs)
# called fit AND partial_fit
total_iterations = (total_iterations_fit_run +
total_iterations_partial_fit_run)
# Failback to using both valid and training batches counts on
# second run
expected = int(total_iterations)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
def test_lr_scheduler_cloneable(self):
# reproduces bug #271
scheduler = LRScheduler(WarmRestartLR, base_lr=123)
clone(scheduler) # does not raise
def test_lr_scheduler_set_params(self, classifier_module, classifier_data):
scheduler = LRScheduler(
CyclicLR, base_lr=123, max_lr=999, step_every='batch')
net = NeuralNetClassifier(
classifier_module,
max_epochs=0,
callbacks=[('scheduler', scheduler)],
)
net.set_params(callbacks__scheduler__base_lr=456)
net.fit(*classifier_data) # we need to trigger on_train_begin
assert net.callbacks[0][1].lr_scheduler_.base_lrs[0] == 456
@pytest.mark.parametrize('policy,kwargs', [
(StepLR, {'gamma': 0.9, 'step_size': 1})
])
def test_lr_scheduler_record_epoch_step(self,
classifier_module,
classifier_data,
policy,
kwargs):
epochs = 3
scheduler = LRScheduler(policy, **kwargs)
lrs = scheduler.simulate(epochs, initial_lr=123.)
net = NeuralNetClassifier(
classifier_module,
max_epochs=epochs,
lr=123.,
callbacks=[('scheduler', scheduler)]
)
net.fit(*classifier_data)
assert np.all(net.history[:, 'event_lr'] == lrs)
def test_lr_scheduler_record_batch_step(self, classifier_module, classifier_data):
X, y = classifier_data
batch_size = 128
scheduler = LRScheduler(
CyclicLR,
base_lr=1,
max_lr=5,
step_size_up=4,
step_every='batch'
)
net = NeuralNetClassifier(
classifier_module,
max_epochs=1,
lr=123.,
batch_size=batch_size,
callbacks=[('scheduler', scheduler)]
)
net.fit(X, y)
new_lrs = scheduler.simulate(
net.history[-1, 'train_batch_count'],
initial_lr=123.,
)
assert np.all(net.history[-1, 'batches', :, 'event_lr'] == new_lrs)
| TestLRCallbacks |
python | coleifer__peewee | playhouse/sqliteq.py | {
"start": 10392,
"end": 10819
} | class ____(ThreadHelper):
__slots__ = ()
def event(self): return GEvent()
def queue(self, max_size=None):
max_size = max_size if max_size is not None else self.queue_max_size
return GQueue(maxsize=max_size or 0)
def thread(self, fn, *args, **kwargs):
def wrap(*a, **k):
gevent.sleep()
return fn(*a, **k)
return GThread(wrap, *args, **kwargs)
| GreenletHelper |
python | apache__airflow | providers/oracle/tests/unit/oracle/operators/test_oracle.py | {
"start": 1161,
"end": 3531
} | class ____:
@mock.patch.object(OracleHook, "run", autospec=OracleHook.run)
def test_execute(self, mock_run):
procedure = "test"
oracle_conn_id = "oracle_default"
parameters = {"parameter": "value"}
context = "test_context"
task_id = "test_task_id"
operator = OracleStoredProcedureOperator(
procedure=procedure,
oracle_conn_id=oracle_conn_id,
parameters=parameters,
task_id=task_id,
)
result = operator.execute(context=context)
assert result is mock_run.return_value
mock_run.assert_called_once_with(
mock.ANY,
"BEGIN test(:parameter); END;",
autocommit=True,
parameters=parameters,
handler=mock.ANY,
)
@mock.patch.object(OracleHook, "callproc", autospec=OracleHook.callproc)
def test_push_oracle_exit_to_xcom(self, mock_callproc, request, dag_maker):
# Test pulls the value previously pushed to xcom and checks if it's the same
procedure = "test_push"
oracle_conn_id = "oracle_default"
parameters = {"parameter": "value"}
task_id = "test_push"
ora_exit_code = f"{random.randrange(10**5):05}"
error = f"ORA-{ora_exit_code}: This is a five-digit ORA error code"
mock_callproc.side_effect = oracledb.DatabaseError(error)
if AIRFLOW_V_3_0_PLUS:
run_task = request.getfixturevalue("run_task")
task = OracleStoredProcedureOperator(
procedure=procedure, oracle_conn_id=oracle_conn_id, parameters=parameters, task_id=task_id
)
run_task(task=task)
assert run_task.xcom.get(task_id=task.task_id, key="ORA") == ora_exit_code
else:
with dag_maker(dag_id=f"dag_{request.node.name}"):
task = OracleStoredProcedureOperator(
procedure=procedure, oracle_conn_id=oracle_conn_id, parameters=parameters, task_id=task_id
)
dr = dag_maker.create_dagrun(run_id=task_id)
ti = TaskInstance(task=task, run_id=dr.run_id)
with pytest.raises(oracledb.DatabaseError, match=re.escape(error)):
ti.run()
assert ti.xcom_pull(task_ids=task.task_id, key="ORA") == ora_exit_code
| TestOracleStoredProcedureOperator |
python | doocs__leetcode | solution/0800-0899/0845.Longest Mountain in Array/Solution.py | {
"start": 0,
"end": 460
} | class ____:
def longestMountain(self, arr: List[int]) -> int:
n = len(arr)
f = [1] * n
g = [1] * n
for i in range(1, n):
if arr[i] > arr[i - 1]:
f[i] = f[i - 1] + 1
ans = 0
for i in range(n - 2, -1, -1):
if arr[i] > arr[i + 1]:
g[i] = g[i + 1] + 1
if f[i] > 1:
ans = max(ans, f[i] + g[i] - 1)
return ans
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/gcs.py | {
"start": 4562,
"end": 8992
} | class ____(BaseTrigger):
"""
A trigger that makes an async call to GCS to check whether the object is updated in a bucket.
:param bucket: google cloud storage bucket name cloud storage where the objects are residing.
:param object_name: the file or folder present in the bucket
:param target_date: context datetime to compare with blob object updated time
:param poke_interval: polling period in seconds to check for file/folder
:param google_cloud_conn_id: reference to the Google Connection
:param hook_params: dict object that has impersonation_chain
"""
def __init__(
self,
bucket: str,
object_name: str,
target_date: datetime,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
):
super().__init__()
self.bucket = bucket
self.object_name = object_name
self.target_date = target_date
self.poke_interval = poke_interval
self.google_cloud_conn_id: str = google_cloud_conn_id
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize GCSCheckBlobUpdateTimeTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSCheckBlobUpdateTimeTrigger",
{
"bucket": self.bucket,
"object_name": self.object_name,
"target_date": self.target_date,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the object updated time is greater than target datetime."""
try:
hook = self._get_async_hook()
while True:
status, res = await self._is_blob_updated_after(
hook=hook,
bucket_name=self.bucket,
object_name=self.object_name,
target_date=self.target_date,
)
if status:
yield TriggerEvent(res)
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> GCSAsyncHook:
return GCSAsyncHook(gcp_conn_id=self.google_cloud_conn_id, **self.hook_params)
async def _is_blob_updated_after(
self, hook: GCSAsyncHook, bucket_name: str, object_name: str, target_date: datetime
) -> tuple[bool, dict[str, Any]]:
"""
Check if the object in the bucket is updated.
:param hook: GCSAsyncHook Hook class
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:param target_date: context datetime to compare with blob object updated time
"""
async with ClientSession() as session:
client = await hook.get_storage_client(session)
bucket = client.get_bucket(bucket_name)
blob = await bucket.get_blob(blob_name=object_name)
if blob is None:
res = {
"message": f"Object ({object_name}) not found in Bucket ({bucket_name})",
"status": "error",
}
return True, res
blob_updated_date = blob.updated # type: ignore[attr-defined]
blob_updated_time = datetime.strptime(blob_updated_date, "%Y-%m-%dT%H:%M:%S.%fZ").replace(
tzinfo=timezone.utc
) # Blob updated time is in string format so converting the string format
# to datetime object to compare the last updated time
if blob_updated_time is not None:
if not target_date.tzinfo:
target_date = target_date.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_updated_time, target_date)
if blob_updated_time > target_date:
return True, {"status": "success", "message": "success"}
return False, {"status": "pending", "message": "pending"}
| GCSCheckBlobUpdateTimeTrigger |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 43572,
"end": 47823
} | class ____(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
prevents exploding gradients (e.g. see the paper `Fast R-CNN`_ by Ross Girshick).
For a batch of size :math:`N`, the unreduced loss can be described as:
.. math::
\ell(x, y) = L = \{l_1, ..., l_N\}^T
with
.. math::
l_n = \begin{cases}
0.5 (x_n - y_n)^2 / beta, & \text{if } |x_n - y_n| < beta \\
|x_n - y_n| - 0.5 * beta, & \text{otherwise }
\end{cases}
If `reduction` is not `none`, then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
.. note::
Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
.. note::
Smooth L1 loss is closely related to :class:`HuberLoss`, being
equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
also known as delta for Huber). This leads to the following differences:
* As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
converges to a constant 0 loss. When beta is 0, Smooth L1 loss is equivalent to L1 loss.
* As beta -> :math:`+\infty`, Smooth L1 loss converges to a constant 0 loss, while
:class:`HuberLoss` converges to :class:`MSELoss`.
* For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
For :class:`HuberLoss`, the slope of the L1 segment is beta.
.. _`Fast R-CNN`: https://arxiv.org/abs/1504.08083
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
The value must be non-negative. Default: 1.0
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
"""
__constants__ = ["reduction"]
def __init__(
self, size_average=None, reduce=None, reduction: str = "mean", beta: float = 1.0
) -> None:
super().__init__(size_average, reduce, reduction)
self.beta = beta
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.smooth_l1_loss(input, target, reduction=self.reduction, beta=self.beta)
| SmoothL1Loss |
python | joerick__pyinstrument | pyinstrument/renderers/speedscope.py | {
"start": 3075,
"end": 8606
} | class ____(FrameRenderer):
"""
Outputs a tree of JSON conforming to the speedscope schema documented at
wiki: https://github.com/jlfwong/speedscope/wiki/Importing-from-custom-sources
schema: https://www.speedscope.app/file-format-schema.json
spec: https://github.com/jlfwong/speedscope/blob/main/src/lib/file-format-spec.ts
example: https://github.com/jlfwong/speedscope/blob/main/sample/profiles/speedscope/0.0.1/simple.speedscope.json
"""
output_file_extension = "speedscope.json"
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
# Member holding a running total of wall clock time needed to
# compute the times at which events occur
self._event_time: float = 0.0
# Map of speedscope frames to speedscope frame indices, needed
# to construct evented speedscope profiles; exploits LIFO
# property of popinfo method in Python 3.7+ dictionaries. This
# dictionary is used to build up the "shared" JSON array in
# speedscope's schema.
self._frame_to_index: dict[SpeedscopeFrame, int] = {}
def render_frame(self, frame: Frame | None) -> list[SpeedscopeEvent]:
"""
Builds up a list of speedscope events that are used to populate the
"events" array in speedscope-formatted JSON.
This method has two notable side effects:
* it populates the self._frame_to_index dictionary that matches
speedscope frames with their positions in the "shared" array of
speedscope output; this dictionary will be used to write this
"shared" array in the render method
* it accumulates a running total of time elapsed by
accumulating the self_time spent in each pyinstrument frame;
this running total is used by speedscope events to construct
a flame chart.
"""
# if frame is None, recursion bottoms out; no event frames
# need to be added
if frame is None:
return []
# Otherwise, form a speedscope frame and add it to the frame
# to index map if the frame is not already a key in that map.
sframe = SpeedscopeFrame(frame.function, frame.file_path, frame.line_no)
if sframe not in self._frame_to_index:
self._frame_to_index[sframe] = len(self._frame_to_index)
# Get the frame index and add a speedscope event corresponding
# to opening a stack frame.
sframe_index = self._frame_to_index[sframe]
open_event = SpeedscopeEvent(SpeedscopeEventType.OPEN, self._event_time, sframe_index)
events_array: list[SpeedscopeEvent] = [open_event]
# Add stack frame open and close events for all child frames
# of this frame.
for child in frame.children:
events_array.extend(self.render_frame(child))
# Update event time for closing this stack frame.
#
# If number of frames approaches 1e16 * desired accuracy
# level, consider using Neumaier-Kahan summation; improves
# worst-case relative accuracy of sum from O(num_summands *
# eps) to (2 * eps + O(num_summands * eps * eps)), where eps
# is IEEE-754 double precision unit roundoff, approximately
# 1e-16. Average case relative accuracy expressions replace
# num_summands with sqrt(num_summands). However, Kahan
# summation quadruples operation count of sum, and Neumaier
# variant also adds a branch & swap for each summand. Pairwise
# summation isn't an option here because a running total is
# needed.
self._event_time += frame.absorbed_time
if frame.is_synthetic_leaf:
# only time contained within leaf nodes is real time i.e. not the sum of children
self._event_time += frame.time
# Add event closing this stack frame.
close_event = SpeedscopeEvent(SpeedscopeEventType.CLOSE, self._event_time, sframe_index)
events_array.append(close_event)
return events_array
def render(self, session: Session):
frame = self.preprocess(session.root_frame())
id_: str = time.strftime("%Y-%m-%dT%H-%M-%S", time.localtime(session.start_time))
name: str = f"CPU profile for '{session.target_description}' at {id_}"
sprofile_list: list[SpeedscopeProfile] = [
SpeedscopeProfile(name, self.render_frame(frame), session.duration)
]
# Exploits Python 3.7+ dictionary property of iterating over
# keys in insertion order to build the list of speedscope
# frames.
sframe_list: list[SpeedscopeFrame] = [sframe for sframe in iter(self._frame_to_index)]
shared_dict = {"frames": sframe_list}
speedscope_file = SpeedscopeFile(name, sprofile_list, shared_dict)
return "%s\n" % json.dumps(speedscope_file, cls=SpeedscopeEncoder)
def default_processors(self) -> ProcessorList:
"""
Default Processors for speedscope renderer; note that
processors.aggregate_repeated_calls is removed because
speedscope is a timeline-based format.
"""
return [
processors.remove_importlib,
processors.remove_tracebackhide,
processors.merge_consecutive_self_time,
processors.remove_irrelevant_nodes,
processors.remove_unnecessary_self_time_nodes,
processors.remove_first_pyinstrument_frames_processor,
]
| SpeedscopeRenderer |
python | simonw__datasette | datasette/views/special.py | {
"start": 18302,
"end": 19354
} | class ____(BaseView):
name = "permission_check"
has_json_alternate = False
async def get(self, request):
await self.ds.ensure_permission(action="permissions-debug", actor=request.actor)
as_format = request.url_vars.get("format")
if not as_format:
return await self.render(
["debug_check.html"],
request,
{
"sorted_actions": sorted(self.ds.actions.keys()),
"has_debug_permission": True,
},
)
# JSON API - action parameter is required
action = request.args.get("action")
if not action:
return Response.json({"error": "action parameter is required"}, status=400)
parent = request.args.get("parent")
child = request.args.get("child")
response, status = await _check_permission_for_actor(
self.ds, action, parent, child, request.actor
)
return Response.json(response, status=status)
| PermissionCheckView |
python | pypa__setuptools | setuptools/tests/config/test_setupcfg.py | {
"start": 496,
"end": 2046
} | class ____(ConfigHandler[Target]):
"""Erroneous handler. Fails to implement required methods."""
section_prefix = "**err**"
def make_package_dir(name, base_dir, ns=False):
dir_package = base_dir
for dir_name in name.split('/'):
dir_package = dir_package.mkdir(dir_name)
init_file = None
if not ns:
init_file = dir_package.join('__init__.py')
init_file.write('')
return dir_package, init_file
def fake_env(
tmpdir, setup_cfg, setup_py=None, encoding='ascii', package_path='fake_package'
):
if setup_py is None:
setup_py = 'from setuptools import setup\nsetup()\n'
tmpdir.join('setup.py').write(setup_py)
config = tmpdir.join('setup.cfg')
config.write(setup_cfg.encode(encoding), mode='wb')
package_dir, init_file = make_package_dir(package_path, tmpdir)
init_file.write(
'VERSION = (1, 2, 3)\n'
'\n'
'VERSION_MAJOR = 1'
'\n'
'def get_version():\n'
' return [3, 4, 5, "dev"]\n'
'\n'
)
return package_dir, config
@contextlib.contextmanager
def get_dist(tmpdir, kwargs_initial=None, parse=True):
kwargs_initial = kwargs_initial or {}
with tmpdir.as_cwd():
dist = Distribution(kwargs_initial)
dist.script_name = 'setup.py'
parse and dist.parse_config_files()
yield dist
def test_parsers_implemented():
with pytest.raises(NotImplementedError):
handler = ErrConfigHandler(None, {}, False, Mock())
handler.parsers
| ErrConfigHandler |
python | huggingface__transformers | tests/models/siglip2/test_modeling_siglip2.py | {
"start": 6770,
"end": 10235
} | class ____:
def __init__(
self,
parent,
batch_size=12,
num_patches=16,
image_num_patches=24,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_patches = num_patches
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
self.seq_length = image_num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[self.batch_size, self.seq_length, self.num_channels * self.patch_size * self.patch_size]
)
pixel_attention_mask = torch.zeros(self.batch_size, self.seq_length, device=torch_device, dtype=torch.long)
spatial_shapes = [
(height, width)
for height in range(1, self.seq_length)
for width in range(1, self.seq_length)
if height * width <= self.seq_length
] * self.batch_size
spatial_shapes = spatial_shapes[: self.batch_size]
spatial_shapes = torch.tensor(spatial_shapes, device=torch_device, dtype=torch.long)
for i, (height, width) in enumerate(spatial_shapes):
pixel_attention_mask[i, : height * width] = 1
config = self.get_config()
return config, pixel_values, pixel_attention_mask, spatial_shapes
def get_config(self):
return Siglip2VisionConfig(
num_patches=self.num_patches,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, pixel_attention_mask, spatial_shapes):
model = Siglip2VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values, pixel_attention_mask, spatial_shapes)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_attention_mask, spatial_shapes = self.prepare_config_and_inputs()
inputs_dict = {
"pixel_values": pixel_values,
"pixel_attention_mask": pixel_attention_mask,
"spatial_shapes": spatial_shapes,
}
return config, inputs_dict
@require_torch
| Siglip2VisionModelTester |
python | django__django | tests/admin_autodiscover/models.py | {
"start": 31,
"end": 102
} | class ____(models.Model):
title = models.CharField(max_length=10)
| Story |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 22097,
"end": 22513
} | class ____(graphene.Union):
"""The output from shutting down a code location server."""
class Meta:
types = (
GrapheneShutdownRepositoryLocationSuccess,
GrapheneRepositoryLocationNotFound,
GrapheneUnauthorizedError,
GraphenePythonError,
)
name = "ShutdownRepositoryLocationMutationResult"
| GrapheneShutdownRepositoryLocationMutationResult |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 8980,
"end": 9153
} | class ____(BaseModel):
"""
Response for task breadcrumbs.
"""
breadcrumbs: Annotated[list[dict[str, Any]], Field(title="Breadcrumbs")]
| TaskBreadcrumbsResponse |
python | getsentry__sentry | src/sentry/integrations/messaging/spec.py | {
"start": 7277,
"end": 9495
} | class ____(DefaultActionHandler):
def __init__(self, spec: MessagingIntegrationSpec):
super().__init__()
self._spec = spec
@property
def provider(self) -> str:
return self._spec.provider_slug
def send_alert(
self,
action: AlertRuleTriggerAction,
incident: Incident,
project: Project,
metric_value: int | float | None,
new_status: IncidentStatus,
notification_uuid: str | None = None,
) -> None:
if metric_value is None:
metric_value = get_metric_count_from_incident(incident)
alert_rule_serialized_response: AlertRuleSerializerResponse = serialize(
incident.alert_rule, None, AlertRuleSerializer()
)
incident_serialized_response: DetailedIncidentSerializerResponse = serialize(
incident, None, DetailedIncidentSerializer()
)
open_period_context = OpenPeriodContext.from_incident(incident=incident)
notification_context = NotificationContext.from_alert_rule_trigger_action(action)
alert_context = AlertContext.from_alert_rule_incident(incident.alert_rule)
metric_issue_context = MetricIssueContext.from_legacy_models(
incident=incident,
new_status=new_status,
metric_value=metric_value,
)
success = self._spec.send_incident_alert_notification(
organization=incident.organization,
alert_context=alert_context,
notification_context=notification_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
alert_rule_serialized_response=alert_rule_serialized_response,
incident_serialized_response=incident_serialized_response,
notification_uuid=notification_uuid,
)
if success:
self.record_alert_sent_analytics(
organization_id=incident.organization.id,
project_id=project.id,
alert_id=incident.alert_rule.id,
external_id=action.target_identifier,
notification_uuid=notification_uuid,
)
| MessagingActionHandler |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_orm.py | {
"start": 19939,
"end": 24542
} | class ____(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
def make_some_columns():
return [Column("c%d" % i, Integer) for i in range(10)]
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*make_some_columns(),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", ForeignKey("a.id")),
*make_some_columns(),
)
Table(
"c",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("b_id", ForeignKey("b.id")),
*make_some_columns(),
)
Table(
"d",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c_id", ForeignKey("c.id")),
*make_some_columns(),
)
Table(
"e",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", ForeignKey("a.id")),
*make_some_columns(),
)
Table(
"f",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("e_id", ForeignKey("e.id")),
*make_some_columns(),
)
Table(
"g",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("e_id", ForeignKey("e.id")),
*make_some_columns(),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
class C(cls.Basic):
pass
class D(cls.Basic):
pass
class E(cls.Basic):
pass
class F(cls.Basic):
pass
class G(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D, E, F, G = cls.classes("A", "B", "C", "D", "E", "F", "G")
a, b, c, d, e, f, g = cls.tables("a", "b", "c", "d", "e", "f", "g")
cls.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B), "es": relationship(E)}
)
cls.mapper_registry.map_imperatively(
B, b, properties={"cs": relationship(C)}
)
cls.mapper_registry.map_imperatively(
C, c, properties={"ds": relationship(D)}
)
cls.mapper_registry.map_imperatively(D, d)
cls.mapper_registry.map_imperatively(
E, e, properties={"fs": relationship(F), "gs": relationship(G)}
)
cls.mapper_registry.map_imperatively(F, f)
cls.mapper_registry.map_imperatively(G, g)
@classmethod
def insert_data(cls, connection):
A, B, C, D, E, F, G = cls.classes("A", "B", "C", "D", "E", "F", "G")
s = Session(connection)
s.add(
A(
bs=[B(cs=[C(ds=[D()])]), B(cs=[C()])],
es=[E(fs=[F()], gs=[G()])],
)
)
s.commit()
def test_fetch_results_integrated(self, testing_engine):
A, B, C, D, E, F, G = self.classes("A", "B", "C", "D", "E", "F", "G")
# this test has been reworked to use the compiled cache again,
# as a real-world scenario.
eng = testing_engine(options={"sqlite_share_pool": True})
sess = Session(eng)
q = sess.query(A).options(
joinedload(A.bs).joinedload(B.cs).joinedload(C.ds),
joinedload(A.es).joinedload(E.fs),
defaultload(A.es).joinedload(E.gs),
)
@profiling.function_call_count()
def initial_run():
list(q.all())
initial_run()
sess.close()
@profiling.function_call_count()
def subsequent_run():
list(q.all())
subsequent_run()
sess.close()
@profiling.function_call_count()
def more_runs():
for i in range(100):
list(q.all())
more_runs()
sess.close()
| JoinedEagerLoadTest |
python | numba__numba | numba/tests/test_tuples.py | {
"start": 18191,
"end": 19119
} | class ____(TestCase):
"""
Test implicit conversions between tuple types.
"""
def check_conversion(self, fromty, toty, val):
pyfunc = identity
cfunc = njit(toty(fromty))(pyfunc)
res = cfunc(val)
self.assertEqual(res, val)
def test_conversions(self):
check = self.check_conversion
fromty = types.UniTuple(types.int32, 2)
check(fromty, types.UniTuple(types.float32, 2), (4, 5))
check(fromty, types.Tuple((types.float32, types.int16)), (4, 5))
aty = types.UniTuple(types.int32, 0)
bty = types.Tuple(())
check(aty, bty, ())
check(bty, aty, ())
with self.assertRaises(errors.TypingError) as raises:
check(fromty, types.Tuple((types.float32,)), (4, 5))
msg = "No conversion from UniTuple(int32 x 2) to UniTuple(float32 x 1)"
self.assertIn(msg, str(raises.exception))
| TestConversions |
python | doocs__leetcode | solution/0300-0399/0340.Longest Substring with At Most K Distinct Characters/Solution.py | {
"start": 0,
"end": 348
} | class ____:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
l = 0
cnt = Counter()
for c in s:
cnt[c] += 1
if len(cnt) > k:
cnt[s[l]] -= 1
if cnt[s[l]] == 0:
del cnt[s[l]]
l += 1
return len(s) - l
| Solution |
python | huggingface__transformers | src/transformers/models/bros/modeling_bros.py | {
"start": 3808,
"end": 4377
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
def forward(self, bbox: torch.Tensor):
bbox_t = bbox.transpose(0, 1)
bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
return bbox_pos_emb
| BrosBboxEmbeddings |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 45611,
"end": 48839
} | class ____(TestCase):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
def test_array_equal_equal_nan(self):
# Test array_equal with equal_nan kwarg
a1 = np.array([1, 2, np.nan])
a2 = np.array([1, np.nan, 2])
a3 = np.array([1, 2, np.inf])
# equal_nan=False by default
assert_(not np.array_equal(a1, a1))
assert_(np.array_equal(a1, a1, equal_nan=True))
assert_(not np.array_equal(a1, a2, equal_nan=True))
# nan's not conflated with inf's
assert_(not np.array_equal(a1, a3, equal_nan=True))
# 0-D arrays
a = np.array(np.nan)
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Non-float dtype - equal_nan should have no effect
a = np.array([1, 2, 3], dtype=int)
assert_(np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Multi-dimensional array
a = np.array([[0, 1], [np.nan, 1]])
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Complex values
a, b = [np.array([1 + 1j])] * 2
a.real, b.imag = np.nan, np.nan
assert_(not np.array_equal(a, b, equal_nan=False))
assert_(np.array_equal(a, b, equal_nan=True))
def test_none_compares_elementwise(self):
a = np.ones(3)
assert_equal(a.__eq__(None), [False, False, False])
assert_equal(a.__ne__(None), [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(
np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
)
assert_(not res)
assert_(type(res) is bool)
@instantiate_parametrized_tests
| TestArrayComparisons |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink27.py | {
"start": 315,
"end": 991
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink27.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url(
"A1", r"external:\\Vboxsvr\share\foo bar.xlsx#'Some Sheet'!A1"
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | matplotlib__matplotlib | lib/matplotlib/_api/__init__.py | {
"start": 887,
"end": 969
} | class ____:
def __repr__(self):
return "<UNSET>"
UNSET = _Unset()
| _Unset |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 41229,
"end": 44605
} | class ____(unittest.TestCase):
@staticmethod
def create_history_model(model, inherited):
custom_model_name_prefix = f"Mock{HistoricalRecords.DEFAULT_MODEL_NAME_PREFIX}"
records = HistoricalRecords(
# Provide a custom history model name, to prevent name collisions
# with existing historical models
custom_model_name=lambda name: f"{custom_model_name_prefix}{name}",
)
records.module = model.__module__
return records.create_history_model(model, inherited)
def test_create_history_model_has_expected_tracked_files_attr(self):
def assert_tracked_fields_equal(model, expected_field_names):
from .. import models
history_model = getattr(
models, f"{HistoricalRecords.DEFAULT_MODEL_NAME_PREFIX}{model.__name__}"
)
self.assertListEqual(
[field.name for field in history_model.tracked_fields],
expected_field_names,
)
assert_tracked_fields_equal(
Poll,
["id", "question", "pub_date"],
)
assert_tracked_fields_equal(
PollWithNonEditableField,
["id", "question", "pub_date", "modified"],
)
assert_tracked_fields_equal(
PollWithExcludeFields,
["id", "question", "place"],
)
assert_tracked_fields_equal(
PollWithExcludedFieldsWithDefaults,
["id", "question"],
)
assert_tracked_fields_equal(
PollWithExcludedFKField,
["id", "question", "pub_date"],
)
assert_tracked_fields_equal(
PollWithAlternativeManager,
["id", "question", "pub_date"],
)
assert_tracked_fields_equal(
PollWithHistoricalIPAddress,
["id", "question", "pub_date"],
)
assert_tracked_fields_equal(
PollWithManyToMany,
["id", "question", "pub_date"],
)
assert_tracked_fields_equal(
Choice,
["id", "poll", "choice", "votes"],
)
assert_tracked_fields_equal(
ModelWithCustomAttrOneToOneField,
["id", "poll"],
)
def test_create_history_model_with_one_to_one_field_to_integer_field(self):
try:
self.create_history_model(AdminProfile, False)
except Exception:
self.fail(
"SimpleHistory should handle foreign keys to one to one"
"fields to integer fields without throwing an exception"
)
def test_create_history_model_with_one_to_one_field_to_char_field(self):
try:
self.create_history_model(Bookcase, False)
except Exception:
self.fail(
"SimpleHistory should handle foreign keys to one to one"
"fields to char fields without throwing an exception."
)
def test_create_history_model_with_multiple_one_to_ones(self):
try:
self.create_history_model(MultiOneToOne, False)
except Exception:
self.fail(
"SimpleHistory should handle foreign keys to one to one"
"fields to one to one fields without throwing an "
"exception."
)
| CreateHistoryModelTests |
python | python__mypy | mypyc/irbuild/prepare.py | {
"start": 30611,
"end": 35925
} | class ____(NamedTuple):
singledispatch_func: FuncDef
dispatch_type: TypeInfo
def get_singledispatch_register_call_info(
decorator: Expression, func: FuncDef
) -> RegisteredImpl | None:
# @fun.register(complex)
# def g(arg): ...
if (
isinstance(decorator, CallExpr)
and len(decorator.args) == 1
and isinstance(decorator.args[0], RefExpr)
):
callee = decorator.callee
dispatch_type = decorator.args[0].node
if not isinstance(dispatch_type, TypeInfo):
return None
if isinstance(callee, MemberExpr):
return registered_impl_from_possible_register_call(callee, dispatch_type)
# @fun.register
# def g(arg: int): ...
elif isinstance(decorator, MemberExpr):
# we don't know if this is a register call yet, so we can't be sure that the function
# actually has arguments
if not func.arguments:
return None
arg_type = get_proper_type(func.arguments[0].variable.type)
if not isinstance(arg_type, Instance):
return None
info = arg_type.type
return registered_impl_from_possible_register_call(decorator, info)
return None
def registered_impl_from_possible_register_call(
expr: MemberExpr, dispatch_type: TypeInfo
) -> RegisteredImpl | None:
if expr.name == "register" and isinstance(expr.expr, NameExpr):
node = expr.expr.node
if isinstance(node, Decorator):
return RegisteredImpl(node.func, dispatch_type)
return None
def adjust_generator_classes_of_methods(mapper: Mapper) -> None:
"""Make optimizations and adjustments to generated generator classes of methods.
This is a separate pass after type map has been built, since we need all classes
to be processed to analyze class hierarchies.
"""
generator_methods = []
for fdef, fn_ir in mapper.func_to_decl.items():
if isinstance(fdef, FuncDef) and (fdef.is_coroutine or fdef.is_generator):
gen_ir = create_generator_class_for_func(
fn_ir.module_name, fn_ir.class_name, fdef, mapper
)
# TODO: We could probably support decorators sometimes (static and class method?)
if not fdef.is_decorated:
name = fn_ir.name
precise_ret_type = True
if fn_ir.class_name is not None:
class_ir = mapper.type_to_ir[fdef.info]
subcls = class_ir.subclasses()
if subcls is None:
# Override could be of a different type, so we can't make assumptions.
precise_ret_type = False
elif class_ir.is_trait:
# Give up on traits. We could possibly have an abstract base class
# for generator return types to make this use precise types.
precise_ret_type = False
else:
for s in subcls:
if name in s.method_decls:
m = s.method_decls[name]
if (
m.is_generator != fn_ir.is_generator
or m.is_coroutine != fn_ir.is_coroutine
):
# Override is of a different kind, and the optimization
# to use a precise generator return type doesn't work.
precise_ret_type = False
else:
class_ir = None
if precise_ret_type:
# Give a more precise type for generators, so that we can optimize
# code that uses them. They return a generator object, which has a
# specific class. Without this, the type would have to be 'object'.
fn_ir.sig.ret_type = RInstance(gen_ir)
if fn_ir.bound_sig:
fn_ir.bound_sig.ret_type = RInstance(gen_ir)
if class_ir is not None:
if class_ir.is_method_final(name):
gen_ir.is_final_class = True
generator_methods.append((name, class_ir, gen_ir))
new_bases = {}
for name, class_ir, gen in generator_methods:
# For generator methods, we need to have subclass generator classes inherit from
# baseclass generator classes when there are overrides to maintain LSP.
base = class_ir.real_base()
if base is not None:
if base.has_method(name):
base_sig = base.method_sig(name)
if isinstance(base_sig.ret_type, RInstance):
base_gen = base_sig.ret_type.class_ir
new_bases[gen] = base_gen
# Add generator inheritance relationships by adjusting MROs.
for deriv, base in new_bases.items():
if base.children is not None:
base.children.append(deriv)
while True:
deriv.mro.append(base)
deriv.base_mro.append(base)
if base not in new_bases:
break
base = new_bases[base]
| RegisteredImpl |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 13344,
"end": 13688
} | class ____(HTTPSuccessful):
"""
subclass of :class:`~HTTPSuccessful`
This indicates that the request has been accepted for processing, but the
processing has not been completed.
code: 202, title: Accepted
"""
code = 202
title = 'Accepted'
explanation = 'The request is accepted for processing.'
| HTTPAccepted |
python | mlflow__mlflow | mlflow/models/evaluation/artifacts.py | {
"start": 413,
"end": 846
} | class ____(EvaluationArtifact):
def _save(self, output_artifact_path):
self._content.save(output_artifact_path)
def _load_content_from_file(self, local_artifact_path):
from PIL.Image import open as open_image
self._content = open_image(local_artifact_path)
self._content.load() # Load image and close the file descriptor.
return self._content
@developer_stable
| ImageEvaluationArtifact |
python | walkccc__LeetCode | solutions/304. Range Sum Query 2D - Immutable/304.py | {
"start": 0,
"end": 671
} | class ____:
def __init__(self, matrix: list[list[int]]):
if not matrix:
return
m = len(matrix)
n = len(matrix[0])
# prefix[i][j] := the sum of matrix[0..i)[0..j)
self.prefix = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m):
for j in range(n):
self.prefix[i + 1][j + 1] = (matrix[i][j] + self.prefix[i][j + 1] +
self.prefix[i + 1][j] - self.prefix[i][j])
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return (self.prefix[row2 + 1][col2 + 1] - self.prefix[row1][col2 + 1] -
self.prefix[row2 + 1][col1] + self.prefix[row1][col1])
| NumMatrix |
python | coleifer__peewee | tests/db_tests.py | {
"start": 13473,
"end": 14348
} | class ____(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
| TestDeferredDatabase |
python | doocs__leetcode | solution/3500-3599/3539.Find Sum of Array Product of Magical Sequences/Solution.py | {
"start": 232,
"end": 971
} | class ____:
def magicalSum(self, m: int, k: int, nums: List[int]) -> int:
@cache
def dfs(i: int, j: int, k: int, st: int) -> int:
if k < 0 or (i == len(nums) and j > 0):
return 0
if i == len(nums):
while st:
k -= st & 1
st >>= 1
return int(k == 0)
res = 0
for t in range(j + 1):
nt = t + st
p = pow(nums[i], t, mod)
nk = k - (nt & 1)
res += comb(j, t) * p * dfs(i + 1, j - t, nk, nt >> 1)
res %= mod
return res
ans = dfs(0, m, k, 0)
dfs.cache_clear()
return ans
| Solution |
python | numba__numba | numba/tests/test_hashing.py | {
"start": 5554,
"end": 8037
} | class ____(TestCase):
def setUp(self):
self.cfunc = jit(nopython=True)(hash_usecase)
def check_hash_values(self, values):
cfunc = self.cfunc
for val in list(values):
nb_hash = cfunc(val)
self.assertIsInstance(nb_hash, int)
try:
self.assertEqual(nb_hash, hash(val))
except AssertionError as e:
print("val, nb_hash, hash(val)")
print(val, nb_hash, hash(val))
print("abs(val), hashing._PyHASH_MODULUS - 1")
print(abs(val), hashing._PyHASH_MODULUS - 1)
raise e
def int_samples(self, typ=np.int64):
for start in (0, -50, 60000, 1 << 32):
info = np.iinfo(typ)
if not info.min <= start <= info.max:
continue
n = 100
yield range(start, start + n)
yield range(start, start + 100 * n, 100)
yield range(start, start + 128 * n, 128)
yield [-1]
def safe_construct(self, typ, value):
return getattr(np, 'int' + str(np.iinfo(typ).bits))(value).view(typ)
def float_samples(self, typ):
info = np.finfo(typ)
for start in (0, 10, info.max ** 0.5, info.max / 1000.0):
n = 100
min_step = max(info.tiny, start * info.resolution)
for step in (1.2, min_step ** 0.5, min_step):
if step < min_step:
continue
a = np.linspace(start, start + n * step, n)
a = a.astype(typ)
yield a
yield -a
yield a + a.mean()
# Infs, nans, zeros, magic -1
a = [0.0, 0.5, -0.0, -1.0, float('inf'), -float('inf'),]
# Python 3.10 has a hash for nan based on the pointer to the PyObject
# containing the nan, skip this input and use explicit test instead.
yield typ(a)
def complex_samples(self, typ, float_ty):
for real in self.float_samples(float_ty):
for imag in self.float_samples(float_ty):
# Ensure equal sizes
real = real[:len(imag)]
imag = imag[:len(real)]
a = real + typ(1j) * imag
# Python 3.10 has a hash for nan based on the pointer to the
# PyObject containing the nan, skip input that ends up as nan
if not np.any(np.isnan(a)):
yield a
| BaseTest |
python | ray-project__ray | python/ray/autoscaler/v2/metrics_reporter.py | {
"start": 403,
"end": 4423
} | class ____:
def __init__(self, prom_metrics: AutoscalerPrometheusMetrics) -> None:
self._prom_metrics = prom_metrics
def report_instances(
self,
instances: List[IMInstance],
node_type_configs: Dict[NodeType, NodeTypeConfig],
):
"""
Record autoscaler metrics for:
- pending_nodes: Nodes that are launching/pending ray start
- active_nodes: Active nodes (nodes running ray)
- recently_failed_nodes: Nodes that are being terminated.
- stopped_nodes: Nodes that are terminated.
"""
# map of instance type to a dict of status to count.
status_count_by_type: Dict[NodeType : Dict[str, int]] = {}
# initialize the status count by type.
for instance_type in node_type_configs.keys():
status_count_by_type[instance_type] = {
"pending": 0,
"running": 0,
"terminating": 0,
"terminated": 0,
}
for instance in instances:
if InstanceUtil.is_ray_pending(instance.status):
status_count_by_type[instance.instance_type]["pending"] += 1
elif InstanceUtil.is_ray_running(instance.status):
status_count_by_type[instance.instance_type]["running"] += 1
elif instance.status == IMInstance.TERMINATING:
status_count_by_type[instance.instance_type]["terminating"] += 1
elif instance.status == IMInstance.TERMINATED:
status_count_by_type[instance.instance_type]["terminated"] += 1
for instance_type, status_count in status_count_by_type.items():
self._prom_metrics.pending_nodes.labels(
SessionName=self._prom_metrics.session_name, NodeType=instance_type
).set(status_count["pending"])
self._prom_metrics.active_nodes.labels(
SessionName=self._prom_metrics.session_name, NodeType=instance_type
).set(status_count["running"])
self._prom_metrics.recently_failed_nodes.labels(
SessionName=self._prom_metrics.session_name, NodeType=instance_type
).set(status_count["terminating"])
self._prom_metrics.stopped_nodes.inc(status_count["terminated"])
def report_resources(
self,
instances: List[IMInstance],
node_type_configs: Dict[NodeType, NodeTypeConfig],
):
"""
Record autoscaler metrics for:
- pending_resources: Pending resources
- cluster_resources: Cluster resources (resources running on the cluster)
"""
# pending resources.
pending_resources = defaultdict(float)
cluster_resources = defaultdict(float)
def _add_resources(resource_map, node_type_configs, node_type, count):
node_resources = node_type_configs[node_type].resources
for resource_name, resource_value in node_resources.items():
resource_map[resource_name] += resource_value * count
for instance in instances:
if InstanceUtil.is_ray_pending(instance.status):
_add_resources(
pending_resources, node_type_configs, instance.instance_type, 1
)
elif InstanceUtil.is_ray_running(instance.status):
_add_resources(
cluster_resources, node_type_configs, instance.instance_type, 1
)
for resource_name, resource_value in pending_resources.items():
self._prom_metrics.pending_resources.labels(
SessionName=self._prom_metrics.session_name, resource=resource_name
).set(resource_value)
for resource_name, resource_value in cluster_resources.items():
self._prom_metrics.cluster_resources.labels(
SessionName=self._prom_metrics.session_name, resource=resource_name
).set(resource_value)
| AutoscalerMetricsReporter |
python | Farama-Foundation__Gymnasium | tests/utils/test_play.py | {
"start": 830,
"end": 7625
} | class ____:
def __init__(self, callback: Callable):
self.data_callback = callback
self.cumulative_reward = 0
self.last_observation = None
def callback(self, obs_t, obs_tp1, action, rew, terminated, truncated, info):
_, obs_tp1, _, rew, _, _, _ = self.data_callback(
obs_t, obs_tp1, action, rew, terminated, truncated, info
)
self.cumulative_reward += rew
self.last_observation = obs_tp1
def dummy_keys_to_action() -> dict[tuple[int], int]:
return {(RELEVANT_KEY_1,): 0, (RELEVANT_KEY_2,): 1}
def dummy_keys_to_action_int() -> dict[int, int]:
return {RELEVANT_KEY_1: 0, RELEVANT_KEY_2: 1}
def dummy_keys_to_action_str() -> dict[str, int]:
"""{'a': 0, 'd': 1}"""
return {chr(RELEVANT_KEY_1): 0, chr(RELEVANT_KEY_2): 1}
@pytest.fixture(autouse=True)
def close_pygame():
yield
pygame.quit()
def test_play_relevant_keys():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
assert game.relevant_keys == {RELEVANT_KEY_1, RELEVANT_KEY_2}
def test_play_relevant_keys_no_mapping():
env = PlayableEnv(render_mode="rgb_array")
with pytest.raises(MissingKeysToAction):
PlayableGame(env)
def test_play_relevant_keys_with_env_attribute():
"""Env has a keys_to_action attribute"""
env = PlayableEnv(render_mode="rgb_array")
env.get_keys_to_action = dummy_keys_to_action
game = PlayableGame(env)
assert game.relevant_keys == {RELEVANT_KEY_1, RELEVANT_KEY_2}
def test_video_size_no_zoom():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
assert game.video_size == env.render().shape[:2]
def test_video_size_zoom():
env = PlayableEnv(render_mode="rgb_array")
zoom = 2.2
game = PlayableGame(env, dummy_keys_to_action(), zoom)
assert game.video_size == tuple(int(dim * zoom) for dim in env.render().shape[:2])
def test_keyboard_quit_event():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
event = Event(pygame.KEYDOWN, {"key": pygame.K_ESCAPE})
assert game.running is True
game.process_event(event)
assert game.running is False
def test_pygame_quit_event():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
event = Event(pygame.QUIT)
assert game.running is True
game.process_event(event)
assert game.running is False
def test_keyboard_relevant_keydown_event():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
event = Event(pygame.KEYDOWN, {"key": RELEVANT_KEY_1})
game.process_event(event)
assert game.pressed_keys == [RELEVANT_KEY_1]
def test_keyboard_irrelevant_keydown_event():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
event = Event(pygame.KEYDOWN, {"key": IRRELEVANT_KEY})
game.process_event(event)
assert game.pressed_keys == []
def test_keyboard_keyup_event():
env = PlayableEnv(render_mode="rgb_array")
game = PlayableGame(env, dummy_keys_to_action())
event = Event(pygame.KEYDOWN, {"key": RELEVANT_KEY_1})
game.process_event(event)
event = Event(pygame.KEYUP, {"key": RELEVANT_KEY_1})
game.process_event(event)
assert game.pressed_keys == []
def test_play_loop_real_env():
SEED = 42
ENV = "CartPole-v1"
# If apply_wrapper is true, we provide keys_to_action through the environment. If str_keys is true, the
# keys_to_action dictionary will have strings as keys
for apply_wrapper, key_type in product([False, True], ["str", "int", "tuple"]):
# set of key events to inject into the play loop as callback
callback_events = [
Event(KEYDOWN, {"key": RELEVANT_KEY_1}),
Event(KEYUP, {"key": RELEVANT_KEY_1}),
Event(KEYDOWN, {"key": RELEVANT_KEY_2}),
Event(KEYUP, {"key": RELEVANT_KEY_2}),
Event(KEYDOWN, {"key": RELEVANT_KEY_1}),
Event(KEYUP, {"key": RELEVANT_KEY_1}),
Event(KEYDOWN, {"key": RELEVANT_KEY_1}),
Event(KEYUP, {"key": RELEVANT_KEY_1}),
Event(KEYDOWN, {"key": RELEVANT_KEY_2}),
Event(KEYUP, {"key": RELEVANT_KEY_2}),
Event(QUIT),
]
keydown_events = [k for k in callback_events if k.type == KEYDOWN]
def callback(obs_t, obs_tp1, action, rew, terminated, truncated, info):
pygame_event = callback_events.pop(0)
event.post(pygame_event)
# after releasing a key, post new events until
# we have one keydown
while pygame_event.type == KEYUP:
pygame_event = callback_events.pop(0)
event.post(pygame_event)
return obs_t, obs_tp1, action, rew, terminated, truncated, info
env = gym.make(ENV, render_mode="rgb_array", disable_env_checker=True)
env.reset(seed=SEED)
if key_type == "tuple":
keys_to_action = dummy_keys_to_action()
elif key_type == "str":
keys_to_action = dummy_keys_to_action_str()
elif key_type == "int":
keys_to_action = dummy_keys_to_action_int()
else:
assert False
# first action is 0 because at the first iteration
# we can not inject a callback event into play()
obs, _, _, _, _ = env.step(0)
for e in keydown_events:
if key_type == "tuple":
action = keys_to_action[(e.key,)]
elif key_type == "str":
action = keys_to_action[chr(e.key)]
elif key_type == "int":
action = keys_to_action[e.key]
else:
assert False
obs, _, _, _, _ = env.step(action)
env_play = gym.make(ENV, render_mode="rgb_array", disable_env_checker=True)
if apply_wrapper:
env_play = KeysToActionWrapper(env, keys_to_action=keys_to_action)
assert hasattr(env_play, "get_keys_to_action")
status = PlayStatus(callback)
play(
env_play,
callback=status.callback,
keys_to_action=None if apply_wrapper else keys_to_action,
seed=SEED,
)
assert (status.last_observation == obs).all()
def test_play_no_keys():
with pytest.raises(MissingKeysToAction):
play(gym.make("CartPole-v1", render_mode="rgb_array"))
def test_wrong_render_mode():
with pytest.raises(
ValueError,
match=r"PlayableGame wrapper works only with rgb_array and rgb_array_list render modes",
):
play(gym.make("CartPole-v1"), keys_to_action={})
| PlayStatus |
python | celery__celery | t/unit/worker/test_consumer.py | {
"start": 30075,
"end": 32072
} | class ____(ConsumerTestCase):
def test_perform_pending_operations_all_success(self):
"""
Test that all pending operations are processed successfully when `once=False`.
"""
c = self.get_consumer(no_hub=True)
# Create mock operations
mock_operation_1 = Mock()
mock_operation_2 = Mock()
# Add mock operations to _pending_operations
c._pending_operations = [mock_operation_1, mock_operation_2]
# Call perform_pending_operations
c.perform_pending_operations()
# Assert that all operations were called
mock_operation_1.assert_called_once()
mock_operation_2.assert_called_once()
# Ensure all pending operations are cleared
assert len(c._pending_operations) == 0
def test_perform_pending_operations_with_exception(self):
"""
Test that pending operations are processed even if one raises an exception, and
the exception is logged when `once=False`.
"""
c = self.get_consumer(no_hub=True)
# Mock operations: one failing, one successful
mock_operation_fail = Mock(side_effect=Exception("Test Exception"))
mock_operation_success = Mock()
# Add operations to _pending_operations
c._pending_operations = [mock_operation_fail, mock_operation_success]
# Patch logger to avoid logging during the test
with patch('celery.worker.consumer.consumer.logger.exception') as mock_logger:
# Call perform_pending_operations
c.perform_pending_operations()
# Assert that both operations were attempted
mock_operation_fail.assert_called_once()
mock_operation_success.assert_called_once()
# Ensure the exception was logged
mock_logger.assert_called_once()
# Ensure all pending operations are cleared
assert len(c._pending_operations) == 0
| test_Consumer_PerformPendingOperations |
python | dateutil__dateutil | tests/test_tz.py | {
"start": 48522,
"end": 59540
} | class ____(unittest.TestCase, TzFoldMixin):
# POSIX string indicating change to summer time on the 2nd Sunday in March
# at 2AM, and ending the 1st Sunday in November at 2AM. (valid >= 2007)
TZ_EST = 'EST+5EDT,M3.2.0/2,M11.1.0/2'
# POSIX string for AEST/AEDT (valid >= 2008)
TZ_AEST = 'AEST-10AEDT,M10.1.0/2,M4.1.0/3'
# POSIX string for GMT/BST
TZ_LON = 'GMT0BST,M3.5.0,M10.5.0'
def gettz(self, tzname):
# Actual time zone changes are handled by the _gettz_context function
tzname_map = {'Australia/Sydney': self.TZ_AEST,
'America/Toronto': self.TZ_EST,
'America/New_York': self.TZ_EST,
'Europe/London': self.TZ_LON}
return tz.tzstr(tzname_map[tzname])
def testStrStr(self):
# Test that tz.tzstr() won't throw an error if given a str instead
# of a unicode literal.
self.assertEqual(datetime(2003, 4, 6, 1, 59,
tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EST")
self.assertEqual(datetime(2003, 4, 6, 2, 00,
tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EDT")
def testStrInequality(self):
TZS1 = tz.tzstr('EST5EDT4')
# Standard abbreviation different
TZS2 = tz.tzstr('ET5EDT4')
self.assertNotEqual(TZS1, TZS2)
# DST abbreviation different
TZS3 = tz.tzstr('EST5EMT')
self.assertNotEqual(TZS1, TZS3)
# STD offset different
TZS4 = tz.tzstr('EST4EDT4')
self.assertNotEqual(TZS1, TZS4)
# DST offset different
TZS5 = tz.tzstr('EST5EDT3')
self.assertNotEqual(TZS1, TZS5)
def testStrInequalityStartEnd(self):
TZS1 = tz.tzstr('EST5EDT4')
# Start delta different
TZS2 = tz.tzstr('EST5EDT4,M4.2.0/02:00:00,M10-5-0/02:00')
self.assertNotEqual(TZS1, TZS2)
# End delta different
TZS3 = tz.tzstr('EST5EDT4,M4.2.0/02:00:00,M11-5-0/02:00')
self.assertNotEqual(TZS1, TZS3)
def testPosixOffset(self):
TZ1 = tz.tzstr('UTC-3')
self.assertEqual(datetime(2015, 1, 1, tzinfo=TZ1).utcoffset(),
timedelta(hours=-3))
TZ2 = tz.tzstr('UTC-3', posix_offset=True)
self.assertEqual(datetime(2015, 1, 1, tzinfo=TZ2).utcoffset(),
timedelta(hours=+3))
def testStrInequalityUnsupported(self):
TZS = tz.tzstr('EST5EDT')
self.assertFalse(TZS == 4)
self.assertTrue(TZS == ComparesEqual)
self.assertFalse(TZS != ComparesEqual)
def testTzStrRepr(self):
TZS1 = tz.tzstr('EST5EDT4')
TZS2 = tz.tzstr('EST')
self.assertEqual(repr(TZS1), "tzstr(" + repr('EST5EDT4') + ")")
self.assertEqual(repr(TZS2), "tzstr(" + repr('EST') + ")")
def testTzStrFailure(self):
with self.assertRaises(ValueError):
tz.tzstr('InvalidString;439999')
def testTzStrSingleton(self):
tz1 = tz.tzstr('EST5EDT')
tz2 = tz.tzstr('CST4CST')
tz3 = tz.tzstr('EST5EDT')
self.assertIsNot(tz1, tz2)
self.assertIs(tz1, tz3)
def testTzStrSingletonPosix(self):
tz_t1 = tz.tzstr('GMT+3', posix_offset=True)
tz_f1 = tz.tzstr('GMT+3', posix_offset=False)
tz_t2 = tz.tzstr('GMT+3', posix_offset=True)
tz_f2 = tz.tzstr('GMT+3', posix_offset=False)
self.assertIs(tz_t1, tz_t2)
self.assertIsNot(tz_t1, tz_f1)
self.assertIs(tz_f1, tz_f2)
def testTzStrInstance(self):
tz1 = tz.tzstr('EST5EDT')
tz2 = tz.tzstr.instance('EST5EDT')
tz3 = tz.tzstr.instance('EST5EDT')
assert tz1 is not tz2
assert tz2 is not tz3
# Ensure that these still are all the same zone
assert tz1 == tz2 == tz3
@pytest.mark.smoke
@pytest.mark.tzstr
def test_tzstr_weakref():
tz_t1 = tz.tzstr('EST5EDT')
tz_t2_ref = weakref.ref(tz.tzstr('EST5EDT'))
assert tz_t1 is tz_t2_ref()
del tz_t1
gc.collect()
assert tz_t2_ref() is not None
assert tz.tzstr('EST5EDT') is tz_t2_ref()
for offset in range(5,15):
tz.tzstr('GMT+{}'.format(offset))
gc.collect()
assert tz_t2_ref() is None
assert tz.tzstr('EST5EDT') is not tz_t2_ref()
@pytest.mark.tzstr
@pytest.mark.parametrize('tz_str,expected', [
# From https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
('', tz.tzrange(None)), # TODO: Should change this so tz.tzrange('') works
('EST+5EDT,M3.2.0/2,M11.1.0/12',
tz.tzrange('EST', -18000, 'EDT', -14400,
start=relativedelta(month=3, day=1, weekday=SU(2), hours=2),
end=relativedelta(month=11, day=1, weekday=SU(1), hours=11))),
('WART4WARST,J1/0,J365/25', # This is DST all year, Western Argentina Summer Time
tz.tzrange('WART', timedelta(hours=-4), 'WARST',
start=relativedelta(month=1, day=1, hours=0),
end=relativedelta(month=12, day=31, days=1))),
('IST-2IDT,M3.4.4/26,M10.5.0', # Israel Standard / Daylight Time
tz.tzrange('IST', timedelta(hours=2), 'IDT',
start=relativedelta(month=3, day=1, weekday=TH(4), days=1, hours=2),
end=relativedelta(month=10, day=31, weekday=SU(-1), hours=1))),
('WGT3WGST,M3.5.0/2,M10.5.0/1',
tz.tzrange('WGT', timedelta(hours=-3), 'WGST',
start=relativedelta(month=3, day=31, weekday=SU(-1), hours=2),
end=relativedelta(month=10, day=31, weekday=SU(-1), hours=0))),
# Different offset specifications
('WGT0300WGST',
tz.tzrange('WGT', timedelta(hours=-3), 'WGST')),
('WGT03:00WGST',
tz.tzrange('WGT', timedelta(hours=-3), 'WGST')),
('AEST-1100AEDT',
tz.tzrange('AEST', timedelta(hours=11), 'AEDT')),
('AEST-11:00AEDT',
tz.tzrange('AEST', timedelta(hours=11), 'AEDT')),
# Different time formats
('EST5EDT,M3.2.0/4:00,M11.1.0/3:00',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(month=3, day=1, weekday=SU(2), hours=4),
end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))),
('EST5EDT,M3.2.0/04:00,M11.1.0/03:00',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(month=3, day=1, weekday=SU(2), hours=4),
end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))),
('EST5EDT,M3.2.0/0400,M11.1.0/0300',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(month=3, day=1, weekday=SU(2), hours=4),
end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))),
])
def test_valid_GNU_tzstr(tz_str, expected):
tzi = tz.tzstr(tz_str)
assert tzi == expected
@pytest.mark.tzstr
@pytest.mark.parametrize('tz_str, expected', [
('EST5EDT,5,4,0,7200,11,3,0,7200',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(month=5, day=1, weekday=SU(+4), hours=+2),
end=relativedelta(month=11, day=1, weekday=SU(+3), hours=+1))),
('EST5EDT,5,-4,0,7200,11,3,0,7200',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(hours=+2, month=5, day=31, weekday=SU(-4)),
end=relativedelta(hours=+1, month=11, day=1, weekday=SU(+3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200,3600',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200,3600',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200,-3600',
tz.tzrange('EST', timedelta(hours=-5), 'EDT', timedelta(hours=-6),
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=+3, month=11, day=31, weekday=SU(-3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200,+7200',
tz.tzrange('EST', timedelta(hours=-5), 'EDT', timedelta(hours=-3),
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=0, month=11, day=31, weekday=SU(-3)))),
('EST5EDT,5,4,0,7200,11,-3,0,7200,+3600',
tz.tzrange('EST', timedelta(hours=-5), 'EDT',
start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)),
end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))),
])
def test_valid_dateutil_format(tz_str, expected):
# This tests the dateutil-specific format that is used widely in the tests
# and examples. It is unclear where this format originated from.
with pytest.warns(tz.DeprecatedTzFormatWarning):
tzi = tz.tzstr.instance(tz_str)
assert tzi == expected
@pytest.mark.tzstr
@pytest.mark.parametrize('tz_str', [
'hdfiughdfuig,dfughdfuigpu87ñ::',
',dfughdfuigpu87ñ::',
'-1:WART4WARST,J1,J365/25',
'WART4WARST,J1,J365/-25',
'IST-2IDT,M3.4.-1/26,M10.5.0',
'IST-2IDT,M3,2000,1/26,M10,5,0'
])
def test_invalid_GNU_tzstr(tz_str):
with pytest.raises(ValueError):
tz.tzstr(tz_str)
# Different representations of the same default rule set
DEFAULT_TZSTR_RULES_EQUIV_2003 = [
'EST5EDT',
'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00',
'EST5EDT4,95/02:00:00,298/02:00',
'EST5EDT4,J96/02:00:00,J299/02:00',
'EST5EDT4,J96/02:00:00,J299/02'
]
@pytest.mark.tzstr
@pytest.mark.parametrize('tz_str', DEFAULT_TZSTR_RULES_EQUIV_2003)
def test_tzstr_default_start(tz_str):
tzi = tz.tzstr(tz_str)
dt_std = datetime(2003, 4, 6, 1, 59, tzinfo=tzi)
dt_dst = datetime(2003, 4, 6, 2, 00, tzinfo=tzi)
assert get_timezone_tuple(dt_std) == EST_TUPLE
assert get_timezone_tuple(dt_dst) == EDT_TUPLE
@pytest.mark.tzstr
@pytest.mark.parametrize('tz_str', DEFAULT_TZSTR_RULES_EQUIV_2003)
def test_tzstr_default_end(tz_str):
tzi = tz.tzstr(tz_str)
dt_dst = datetime(2003, 10, 26, 0, 59, tzinfo=tzi)
dt_dst_ambig = datetime(2003, 10, 26, 1, 00, tzinfo=tzi)
dt_std_ambig = tz.enfold(dt_dst_ambig, fold=1)
dt_std = datetime(2003, 10, 26, 2, 00, tzinfo=tzi)
assert get_timezone_tuple(dt_dst) == EDT_TUPLE
assert get_timezone_tuple(dt_dst_ambig) == EDT_TUPLE
assert get_timezone_tuple(dt_std_ambig) == EST_TUPLE
assert get_timezone_tuple(dt_std) == EST_TUPLE
@pytest.mark.tzstr
@pytest.mark.parametrize('tzstr_1', ['EST5EDT',
'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00'])
@pytest.mark.parametrize('tzstr_2', ['EST5EDT',
'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00'])
def test_tzstr_default_cmp(tzstr_1, tzstr_2):
tz1 = tz.tzstr(tzstr_1)
tz2 = tz.tzstr(tzstr_2)
assert tz1 == tz2
| TZStrTest |
python | tensorflow__tensorflow | tensorflow/python/ops/variables.py | {
"start": 3496,
"end": 5160
} | class ____(enum.Enum):
"""Indicates how a distributed variable will be aggregated.
`tf.distribute.Strategy` distributes a model by making multiple copies
(called "replicas") acting on different elements of the input batch in a
data parallel model. When performing some variable-update operation,
for example `var.assign_add(x)`, in a model, we need to resolve how to combine
the different values for `x` computed in the different replicas.
* `NONE`: This is the default, giving an error if you use a
variable-update operation with multiple replicas.
* `SUM`: Add the updates across replicas.
* `MEAN`: Take the arithmetic mean ("average") of the updates across replicas.
* `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same
update, but we only want to perform the update once. Used, e.g., for the
global step counter.
For example:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> with strategy.scope():
... v = tf.Variable(5.0, aggregation=tf.VariableAggregation.MEAN)
>>> @tf.function
... def update_fn():
... return v.assign_add(1.0)
>>> strategy.run(update_fn)
PerReplica:{
0: <tf.Tensor: shape=(), dtype=float32, numpy=6.0>,
1: <tf.Tensor: shape=(), dtype=float32, numpy=6.0>
}
"""
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, VariableAggregation):
return int(self.value) == int(other.value)
else:
return False
@tf_export(v1=["VariableAggregation"])
| VariableAggregationV2 |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 5184,
"end": 5506
} | class ____(str, _Action, Enum):
CREATE = "create_replicate"
READ = "read_replicate"
UPDATE = "update_replicate"
DELETE = "delete_replicate"
@staticmethod
def values() -> List[str]:
return [action.value for action in ReplicateAction]
ActionT = TypeVar("ActionT", bound=Enum)
| ReplicateAction |
python | openai__openai-python | src/openai/types/chat/chat_completion_function_message_param.py | {
"start": 262,
"end": 591
} | class ____(TypedDict, total=False):
content: Required[Optional[str]]
"""The contents of the function message."""
name: Required[str]
"""The name of the function to call."""
role: Required[Literal["function"]]
"""The role of the messages author, in this case `function`."""
| ChatCompletionFunctionMessageParam |
python | great-expectations__great_expectations | tests/core/test__docs_decorators.py | {
"start": 18922,
"end": 19383
} | class ____:
"""Docstring summary.
Longer description.
Args:
some_arg: some_arg description.
other_arg: other_arg description.
"""
def __init__(self, some_arg, other_arg) -> None:
self.some_arg = some_arg
self.other_arg = other_arg
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
| _ClassFullDocstringPublicAPI |
python | huggingface__transformers | src/transformers/models/got_ocr2/convert_got_ocr2_weights_to_hf.py | {
"start": 6149,
"end": 9642
} | class ____(TikTokenConverter):
def __init__(
self,
vocab_file,
special_tokens: list[str],
pattern: str,
model_max_length: int,
chat_template: Optional[str] = None,
**kwargs,
):
super().__init__(vocab_file, pattern=pattern)
self.additional_special_tokens = special_tokens
tokenizer = self.converted()
if chat_template is not None:
kwargs["chat_template"] = chat_template
self.tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
model_input_names=["input_ids", "attention_mask"],
model_max_length=model_max_length,
**kwargs,
)
def write_tokenizer(tokenizer_path: str, save_dir: str, push_to_hub: bool = False):
model_max_length = CONTEXT_LENGTH
pattern = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"
# Special tokens
special_tokens = (
["<|endoftext|>", "<|im_start|>", "<|im_end|>"]
+ [f"<|extra_{i}|>" for i in range(205)]
+ [
"<ref>",
"</ref>",
"<box>",
"</box>",
"<quad>",
"</quad>",
"<img>",
"</img>",
"<imgpad>",
]
)
pad_token = "<|endoftext|>"
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False, normalized=False, single_word=False)
converter = GotOcr2Converter(
vocab_file=tokenizer_path,
pattern=pattern,
special_tokens=special_tokens,
model_max_length=model_max_length,
pad_token=pad_token,
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
clean_up_tokenization_spaces=True,
)
tokenizer = converter.tokenizer
tokenizer.save_pretrained(save_dir)
if push_to_hub:
tokenizer.push_to_hub("stepfun-ai/GOT-OCR-2.0-hf")
def write_image_processor(save_dir: str, push_to_hub: bool = False):
image_processor = GotOcr2ImageProcessor(
do_resize=True,
size={"height": 1024, "width": 1024},
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
)
image_processor.save_pretrained(save_dir)
if push_to_hub:
image_processor.push_to_hub("stepfun-ai/GOT-OCR-2.0-hf")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
default="stepfun-ai/GOT-OCR2_0",
help="Location of LLaMA weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_dir",
default="GotOcr2",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
write_tokenizer(
tokenizer_path="qwen.tiktoken",
save_dir=args.output_dir,
push_to_hub=args.push_to_hub,
)
write_image_processor(
save_dir=args.output_dir,
push_to_hub=args.push_to_hub,
)
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
push_to_hub=args.push_to_hub,
)
if __name__ == "__main__":
main()
| GotOcr2Converter |
python | apache__airflow | airflow-core/tests/unit/utils/log/test_log_reader.py | {
"start": 1822,
"end": 16201
} | class ____:
DAG_ID = "dag_log_reader"
TASK_ID = "task_log_reader"
DEFAULT_DATE = timezone.datetime(2017, 9, 1)
FILENAME_TEMPLATE = "{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts | replace(':', '.') }}/{{ try_number }}.log"
@pytest.fixture(autouse=True)
def log_dir(self):
with tempfile.TemporaryDirectory() as log_dir:
self.log_dir = log_dir
yield log_dir
del self.log_dir
@pytest.fixture(autouse=True)
def settings_folder(self):
old_modules = dict(sys.modules)
with tempfile.TemporaryDirectory() as settings_folder:
self.settings_folder = settings_folder
sys.path.append(settings_folder)
yield settings_folder
sys.path.remove(settings_folder)
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
for mod in [m for m in sys.modules if m not in old_modules]:
del sys.modules[mod]
del self.settings_folder
@pytest.fixture(autouse=True)
def configure_loggers(self, log_dir, settings_folder):
logging_config = {**DEFAULT_LOGGING_CONFIG}
logging_config["handlers"] = {**logging_config["handlers"]}
logging_config["handlers"]["task"] = {
**logging_config["handlers"]["task"],
"base_log_folder": log_dir,
}
mod = types.SimpleNamespace()
mod.LOGGING_CONFIG = logging_config
# "Inject" a fake module into sys so it loads it without needing to write valid python code
sys.modules["airflow_local_settings_test"] = mod
with conf_vars({("logging", "logging_config_class"): "airflow_local_settings_test.LOGGING_CONFIG"}):
settings.configure_logging()
try:
yield
finally:
del sys.modules["airflow_local_settings_test"]
settings.configure_logging()
@pytest.fixture(autouse=True)
def prepare_log_files(self, log_dir):
dir_path = f"{log_dir}/{self.DAG_ID}/{self.TASK_ID}/2017-09-01T00.00.00+00.00/"
os.makedirs(dir_path)
for try_number in range(1, 4):
with open(f"{dir_path}/{try_number}.log", "w+") as f:
f.write(f"try_number={try_number}.\n")
f.flush()
@pytest.fixture(autouse=True)
def prepare_db(self, create_task_instance):
session = settings.Session()
log_template = LogTemplate(filename=self.FILENAME_TEMPLATE, elasticsearch_id="")
session.add(log_template)
session.commit()
ti = create_task_instance(
dag_id=self.DAG_ID,
task_id=self.TASK_ID,
start_date=self.DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
logical_date=self.DEFAULT_DATE,
state=TaskInstanceState.RUNNING,
)
ti.try_number = 3
ti.hostname = "localhost"
self.ti = ti
yield
clear_db_runs()
clear_db_dags()
session.delete(log_template)
session.commit()
def test_test_read_log_chunks_should_read_one_try(self):
task_log_reader = TaskLogReader()
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
logs, metadata = task_log_reader.read_log_chunks(ti=ti, try_number=1, metadata={})
logs = list(logs)
assert logs[0].event == "::group::Log message source details"
assert logs[0].sources == [
f"{self.log_dir}/dag_log_reader/task_log_reader/2017-09-01T00.00.00+00.00/1.log"
]
assert logs[1].event == "::endgroup::"
assert logs[2].event == "try_number=1."
assert metadata == {"end_of_log": True, "log_pos": 1}
def test_test_read_log_chunks_should_read_latest_files(self):
task_log_reader = TaskLogReader()
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
logs, metadata = task_log_reader.read_log_chunks(ti=ti, try_number=None, metadata={})
logs = list(logs)
assert logs[0].event == "::group::Log message source details"
assert logs[0].sources == [
f"{self.log_dir}/dag_log_reader/task_log_reader/2017-09-01T00.00.00+00.00/3.log"
]
assert logs[1].event == "::endgroup::"
assert logs[2].event == f"try_number={ti.try_number}."
assert metadata == {"end_of_log": True, "log_pos": 1}
def test_test_test_read_log_stream_should_read_one_try(self):
task_log_reader = TaskLogReader()
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
stream = task_log_reader.read_log_stream(ti=ti, try_number=1, metadata={})
assert list(stream) == [
'{"timestamp":null,'
'"event":"::group::Log message source details",'
f'"sources":["{self.log_dir}/dag_log_reader/task_log_reader/2017-09-01T00.00.00+00.00/1.log"]'
"}\n",
'{"timestamp":null,"event":"::endgroup::"}\n',
'{"timestamp":null,"event":"try_number=1."}\n',
]
def test_test_test_read_log_stream_should_read_latest_logs(self):
task_log_reader = TaskLogReader()
self.ti.state = TaskInstanceState.SUCCESS # Ensure mocked instance is completed to return stream
stream = task_log_reader.read_log_stream(ti=self.ti, try_number=None, metadata={})
assert list(stream) == [
'{"timestamp":null,'
'"event":"::group::Log message source details",'
f'"sources":["{self.log_dir}/dag_log_reader/task_log_reader/2017-09-01T00.00.00+00.00/3.log"]'
"}\n",
'{"timestamp":null,"event":"::endgroup::"}\n',
'{"timestamp":null,"event":"try_number=3."}\n',
]
@mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.read")
def test_read_log_stream_should_support_multiple_chunks(self, mock_read):
from airflow.utils.log.file_task_handler import StructuredLogMessage
first_return = (convert_list_to_stream([StructuredLogMessage(event="1st line")]), {})
second_return = (
convert_list_to_stream([StructuredLogMessage(event="2nd line")]),
{"end_of_log": False},
)
third_return = (
convert_list_to_stream([StructuredLogMessage(event="3rd line")]),
{"end_of_log": True},
)
fourth_return = (
convert_list_to_stream([StructuredLogMessage(event="should never be read")]),
{"end_of_log": True},
)
mock_read.side_effect = [first_return, second_return, third_return, fourth_return]
task_log_reader = TaskLogReader()
self.ti.state = TaskInstanceState.SUCCESS
log_stream = task_log_reader.read_log_stream(ti=self.ti, try_number=1, metadata={})
assert list(log_stream) == [
'{"timestamp":null,"event":"1st line"}\n',
'{"timestamp":null,"event":"2nd line"}\n',
'{"timestamp":null,"event":"3rd line"}\n',
]
# as the metadata is now updated in place, when the latest run update metadata.
# the metadata stored in the mock_read will also be updated
# https://github.com/python/cpython/issues/77848
mock_read.assert_has_calls(
[
mock.call(self.ti, 1, metadata={"end_of_log": True}),
mock.call(self.ti, 1, metadata={"end_of_log": True}),
mock.call(self.ti, 1, metadata={"end_of_log": True}),
],
any_order=False,
)
@mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.read")
def test_read_log_stream_should_read_each_try_in_turn(self, mock_read):
from airflow.utils.log.file_task_handler import StructuredLogMessage
mock_read.side_effect = [
(
convert_list_to_stream([StructuredLogMessage(event="try_number=3.")]),
{"end_of_log": True},
)
]
task_log_reader = TaskLogReader()
log_stream = task_log_reader.read_log_stream(ti=self.ti, try_number=None, metadata={})
assert list(log_stream) == ['{"timestamp":null,"event":"try_number=3."}\n']
mock_read.assert_has_calls(
[
mock.call(self.ti, 3, metadata={"end_of_log": True}),
],
any_order=False,
)
@mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.read")
def test_read_log_stream_no_end_of_log_marker(self, mock_read):
from airflow.utils.log.file_task_handler import StructuredLogMessage
mock_read.side_effect = [
([StructuredLogMessage(event="hello")], {"end_of_log": False}),
*[([], {"end_of_log": False}) for _ in range(10)],
]
self.ti.state = TaskInstanceState.SUCCESS
task_log_reader = TaskLogReader()
task_log_reader.STREAM_LOOP_SLEEP_SECONDS = 0.001 # to speed up the test
log_stream = task_log_reader.read_log_stream(ti=self.ti, try_number=1, metadata={})
assert list(log_stream) == [
'{"timestamp":null,"event":"hello"}\n',
'{"event": "Log stream stopped - End of log marker not found; logs may be incomplete."}\n',
]
assert mock_read.call_count == 11
def test_supports_external_link(self):
task_log_reader = TaskLogReader()
# Short circuit if log_handler doesn't include ExternalLoggingMixin
task_log_reader.log_handler = mock.MagicMock()
mock_prop = mock.PropertyMock()
mock_prop.return_value = False
type(task_log_reader.log_handler).supports_external_link = mock_prop
assert not task_log_reader.supports_external_link
mock_prop.assert_not_called()
# Otherwise, defer to the log_handlers supports_external_link
task_log_reader.log_handler = mock.MagicMock(spec=ExternalLoggingMixin)
type(task_log_reader.log_handler).supports_external_link = mock_prop
assert not task_log_reader.supports_external_link
mock_prop.assert_called_once()
mock_prop.return_value = True
assert task_log_reader.supports_external_link
def test_task_log_filename_unique(self, dag_maker):
"""
Ensure the default log_filename_template produces a unique filename.
See discussion in apache/airflow#19058 [1]_ for how uniqueness may
change in a future Airflow release. For now, the logical date is used
to distinguish DAG runs. This test should be modified when the logical
date is no longer used to ensure uniqueness.
[1]: https://github.com/apache/airflow/issues/19058
"""
dag_id = "test_task_log_filename_ts_corresponds_to_logical_date"
task_id = "echo_run_type"
def echo_run_type(dag_run: DagRun, **kwargs):
print(dag_run.run_type)
with dag_maker(dag_id, start_date=self.DEFAULT_DATE, schedule="@daily") as dag:
PythonOperator(task_id=task_id, python_callable=echo_run_type)
start = pendulum.datetime(2021, 1, 1)
end = start + datetime.timedelta(days=1)
trigger_time = end + datetime.timedelta(hours=4, minutes=29) # Arbitrary.
# Create two DAG runs that have the same data interval, but not the same
# logical date, to check if they correctly use different log files.
scheduled_dagrun: DagRun = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
logical_date=start,
data_interval=DataInterval(start, end),
)
manual_dagrun: DagRun = dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
logical_date=trigger_time,
data_interval=DataInterval(start, end),
)
scheduled_ti = scheduled_dagrun.get_task_instance(task_id)
manual_ti = manual_dagrun.get_task_instance(task_id)
assert scheduled_ti is not None
assert manual_ti is not None
scheduled_ti.refresh_from_task(dag.get_task(task_id))
manual_ti.refresh_from_task(dag.get_task(task_id))
reader = TaskLogReader()
assert reader.render_log_filename(scheduled_ti, 1) != reader.render_log_filename(manual_ti, 1)
@pytest.mark.parametrize(
("state", "try_number", "expected_event", "use_self_ti"),
[
(TaskInstanceState.SKIPPED, 0, "Task was skipped — no logs available.", False),
(
TaskInstanceState.UPSTREAM_FAILED,
0,
"Task did not run because upstream task(s) failed.",
False,
),
(TaskInstanceState.SUCCESS, 1, "try_number=1.", True),
],
)
def test_read_log_chunks_no_logs_and_normal(
self, create_task_instance, state, try_number, expected_event, use_self_ti
):
task_log_reader = TaskLogReader()
if use_self_ti:
ti = copy.copy(self.ti) # already prepared with log files
else:
ti = create_task_instance(dag_id="dag_no_logs", task_id="task_no_logs")
ti.state = state
logs, _ = task_log_reader.read_log_chunks(ti=ti, try_number=try_number, metadata={})
events = [log.event for log in logs]
assert any(expected_event in e for e in events)
@pytest.mark.parametrize(
("state", "try_number", "expected_event", "use_self_ti"),
[
(TaskInstanceState.SKIPPED, 0, "Task was skipped — no logs available.", False),
(
TaskInstanceState.UPSTREAM_FAILED,
0,
"Task did not run because upstream task(s) failed.",
False,
),
(TaskInstanceState.SUCCESS, 1, "try_number=1.", True),
],
)
def test_read_log_stream_no_logs_and_normal(
self, create_task_instance, state, try_number, expected_event, use_self_ti
):
task_log_reader = TaskLogReader()
if use_self_ti:
ti = copy.copy(self.ti) # session-bound TI with logs
else:
ti = create_task_instance(dag_id="dag_no_logs", task_id="task_no_logs")
ti.state = state
stream = task_log_reader.read_log_stream(ti=ti, try_number=try_number, metadata={})
assert any(expected_event in line for line in stream)
| TestLogView |
python | gabrielfalcao__HTTPretty | httpretty/core.py | {
"start": 37526,
"end": 41758
} | class ____(BaseClass):
"""Internal representation of `URIs <https://en.wikipedia.org/wiki/Uniform_Resource_Identifier>`_
.. tip:: all arguments are optional
:param username:
:param password:
:param hostname:
:param port:
:param path:
:param query:
:param fragment:
:param scheme:
:param last_request:
"""
default_str_attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
if query:
query_items = sorted(parse_qs(query).items())
self.query = urlencode(
encode_obj(query_items),
doseq=True,
)
else:
self.query = ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def to_str(self, attrs):
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __str__(self):
return self.to_str(self.default_str_attrs)
def str_with_query(self):
attrs = self.default_str_attrs + ('query',)
return self.to_str(attrs)
def __hash__(self):
return int(hashlib.sha1(bytes(self, 'ascii')).hexdigest(), 16)
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
"""
:param use_querystring: bool
:returns: a string with the full url with the format ``{scheme}://{credentials}{domain}{path}{query}``
"""
credentials = ""
if self.password:
credentials = "{}:{}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
"""
:returns: a string in the form ``{domain}:{port}`` or just the domain if the port is 80 or 443
"""
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
"""
:param uri: string
:param entry: an instance of :py:class:`~httpretty.core.Entry`
"""
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
| URIInfo |
python | getsentry__sentry | src/sentry/users/api/bases/user.py | {
"start": 3955,
"end": 4889
} | class ____(Endpoint):
"""
The base endpoint for APIs that deal with Users. Inherit from this class to
get permission checks and to automatically convert user ID "me" to the
currently logged in user's ID.
"""
permission_classes: tuple[type[BasePermission], ...] = (UserPermission,)
def convert_args(
self, request: Request, user_id: int | str | None = None, *args: Any, **kwargs: Any
) -> Any:
if user_id == "me":
if not request.user.is_authenticated:
raise ResourceDoesNotExist
user_id = request.user.id
if user_id is None:
raise ResourceDoesNotExist
try:
user = User.objects.get(id=user_id)
except (User.DoesNotExist, ValueError):
raise ResourceDoesNotExist
self.check_object_permissions(request, user)
kwargs["user"] = user
return args, kwargs
| UserEndpoint |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 21406,
"end": 21622
} | class ____:
"""Record for errors encountered during Dg config validation."""
@property
@abstractmethod
def message(self) -> str:
"""The error message to display."""
@record
| _DgConfigErrorRecord |
python | getsentry__sentry | tests/sentry/models/test_groupassignee.py | {
"start": 704,
"end": 15648
} | class ____(TestCase):
def test_constraints(self) -> None:
# Can't both be assigned
with pytest.raises(AssertionError):
GroupAssignee.objects.create(
group=self.group, project=self.group.project, user_id=self.user.id, team=self.team
)
# Can't have nobody assigned
with pytest.raises(AssertionError):
GroupAssignee.objects.create(
group=self.group, project=self.group.project, user_id=None, team=None
)
def test_assign_user(self) -> None:
GroupAssignee.objects.assign(self.group, self.user)
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, user_id=self.user.id, team__isnull=True
).exists()
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.user.id)
assert activity.data["assigneeEmail"] == self.user.email
assert activity.data["assigneeName"] == self.user.name
assert activity.data["assigneeType"] == "user"
def test_assign_team(self) -> None:
GroupAssignee.objects.assign(self.group, self.team)
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, team=self.team, user_id__isnull=True
).exists()
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.team.id)
assert activity.data["assigneeEmail"] is None
assert activity.data["assigneeName"] == self.team.name
assert activity.data["assigneeType"] == "team"
def test_create_only(self) -> None:
result = GroupAssignee.objects.assign(self.group, self.user)
assert result == {"new_assignment": True, "updated_assignment": False}
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, user_id=self.user.id, team__isnull=True
).exists()
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.user.id)
assert activity.data["assigneeEmail"] == self.user.email
assert activity.data["assigneeName"] == self.user.name
assert activity.data["assigneeType"] == "user"
other_user = self.create_user()
result = GroupAssignee.objects.assign(self.group, other_user, create_only=True)
assert result == {"new_assignment": False, "updated_assignment": False}
# Assignee should not have changed
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, user_id=self.user.id, team__isnull=True
).exists()
# Should be no new activity rows
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.user.id)
assert activity.data["assigneeEmail"] == self.user.email
assert activity.data["assigneeName"] == self.user.name
assert activity.data["assigneeType"] == "user"
def test_reassign_user_to_team(self) -> None:
GroupAssignee.objects.assign(self.group, self.user)
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, user_id=self.user.id, team__isnull=True
).exists()
GroupAssignee.objects.assign(self.group, self.team)
assert GroupAssignee.objects.filter(
project=self.group.project, group=self.group, team=self.team, user_id__isnull=True
).exists()
activity = list(
Activity.objects.filter(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
).order_by("id")
)
assert activity[0].data["assignee"] == str(self.user.id)
assert activity[0].data["assigneeEmail"] == self.user.email
assert activity[0].data["assigneeName"] == self.user.name
assert activity[0].data["assigneeType"] == "user"
assert activity[1].data["assignee"] == str(self.team.id)
assert activity[1].data["assigneeEmail"] is None
assert activity[1].data["assigneeName"] == self.team.name
assert activity[1].data["assigneeType"] == "team"
@mock.patch.object(ExampleIntegration, "sync_assignee_outbound")
def test_assignee_sync_outbound_assign(
self, mock_sync_assignee_outbound: mock.MagicMock
) -> None:
group = self.group
integration = self.create_integration(
organization=group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
with self.feature({"organizations:integrations-issue-sync": True}):
with self.tasks():
GroupAssignee.objects.assign(
self.group,
self.user,
)
mock_sync_assignee_outbound.assert_called_with(
external_issue,
user_service.get_user(self.user.id),
assign=True,
assignment_source=None,
)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user_id=self.user.id,
team__isnull=True,
).exists()
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.user.id)
assert activity.data["assigneeEmail"] == self.user.email
assert activity.data["assigneeName"] == self.user.name
assert activity.data["assigneeType"] == "user"
@mock.patch.object(ExampleIntegration, "sync_assignee_outbound")
def test_assignee_sync_outbound_assign_with_matching_source_integration(
self, mock_sync_assignee_outbound
):
group = self.group
integration = self.create_integration(
organization=group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
with self.feature({"organizations:integrations-issue-sync": True}):
with self.tasks():
# Assert that we don't perform an outbound assignment if
# the source of the assignment is the same target integration
GroupAssignee.objects.assign(
self.group,
self.user,
assignment_source=AssignmentSource.from_integration(integration),
)
mock_sync_assignee_outbound.assert_not_called()
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user_id=self.user.id,
team__isnull=True,
).exists()
activity = Activity.objects.get(
project=self.group.project, group=self.group, type=ActivityType.ASSIGNED.value
)
assert activity.data["assignee"] == str(self.user.id)
assert activity.data["assigneeEmail"] == self.user.email
assert activity.data["assigneeName"] == self.user.name
assert activity.data["assigneeType"] == "user"
@mock.patch.object(ExampleIntegration, "sync_assignee_outbound")
def test_assignee_sync_outbound_unassign(
self, mock_sync_assignee_outbound: mock.MagicMock
) -> None:
group = self.group
integration = self.create_integration(
organization=group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
GroupAssignee.objects.assign(self.group, self.user)
with self.feature({"organizations:integrations-issue-sync": True}):
with self.tasks():
GroupAssignee.objects.deassign(self.group, self.user)
mock_sync_assignee_outbound.assert_called_with(
external_issue, None, assign=False, assignment_source=None
)
assert not GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user_id=self.user.id,
team__isnull=True,
).exists()
assert Activity.objects.filter(
project=self.group.project, group=self.group, type=ActivityType.UNASSIGNED.value
).exists()
def test_assignee_sync_inbound_assign(self) -> None:
group = self.group
user_no_access = self.create_user()
user_w_access = self.user
integration = self.create_integration(
organization=group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
with self.feature("organizations:integrations-issue-sync"):
# no permissions
groups_updated = sync_group_assignee_inbound(
integration, user_no_access.email, "APP-123"
)
assert not groups_updated
# w permissions
groups_updated = sync_group_assignee_inbound(
integration, user_w_access.email, "APP-123"
)
assert groups_updated[0] == group
assert GroupAssignee.objects.filter(
project=group.project, group=group, user_id=user_w_access.id, team__isnull=True
).exists()
# confirm capitalization doesn't affect syncing
groups_updated = sync_group_assignee_inbound(
integration, user_w_access.email.title(), "APP-123"
)
assert groups_updated[0] == group
assert GroupAssignee.objects.filter(
project=group.project, group=group, user_id=user_w_access.id, team__isnull=True
).exists()
def test_assignee_sync_inbound_deassign(self) -> None:
group = self.group
integration = self.create_integration(
organization=group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
GroupAssignee.objects.assign(group, self.user)
with self.feature("organizations:integrations-issue-sync"):
groups_updated = sync_group_assignee_inbound(
integration, self.user.email, "APP-123", assign=False
)
assert groups_updated[0] == group
assert not GroupAssignee.objects.filter(
project=group.project, group=group, user_id=self.user.id, team__isnull=True
).exists()
| GroupAssigneeTestCase |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_code_execution_output_block_param.py | {
"start": 233,
"end": 379
} | class ____(TypedDict, total=False):
file_id: Required[str]
type: Required[Literal["code_execution_output"]]
| BetaCodeExecutionOutputBlockParam |
python | plotly__plotly.py | plotly/graph_objs/mesh3d/_lighting.py | {
"start": 233,
"end": 7753
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "mesh3d"
_path_str = "mesh3d.lighting"
_valid_props = {
"ambient",
"diffuse",
"facenormalsepsilon",
"fresnel",
"roughness",
"specular",
"vertexnormalsepsilon",
}
@property
def ambient(self):
"""
Ambient light increases overall color visibility but can wash
out the image.
The 'ambient' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["ambient"]
@ambient.setter
def ambient(self, val):
self["ambient"] = val
@property
def diffuse(self):
"""
Represents the extent that incident rays are reflected in a
range of angles.
The 'diffuse' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["diffuse"]
@diffuse.setter
def diffuse(self, val):
self["diffuse"] = val
@property
def facenormalsepsilon(self):
"""
Epsilon for face normals calculation avoids math issues arising
from degenerate geometry.
The 'facenormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["facenormalsepsilon"]
@facenormalsepsilon.setter
def facenormalsepsilon(self, val):
self["facenormalsepsilon"] = val
@property
def fresnel(self):
"""
Represents the reflectance as a dependency of the viewing
angle; e.g. paper is reflective when viewing it from the edge
of the paper (almost 90 degrees), causing shine.
The 'fresnel' property is a number and may be specified as:
- An int or float in the interval [0, 5]
Returns
-------
int|float
"""
return self["fresnel"]
@fresnel.setter
def fresnel(self, val):
self["fresnel"] = val
@property
def roughness(self):
"""
Alters specular reflection; the rougher the surface, the wider
and less contrasty the shine.
The 'roughness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["roughness"]
@roughness.setter
def roughness(self, val):
self["roughness"] = val
@property
def specular(self):
"""
Represents the level that incident rays are reflected in a
single direction, causing shine.
The 'specular' property is a number and may be specified as:
- An int or float in the interval [0, 2]
Returns
-------
int|float
"""
return self["specular"]
@specular.setter
def specular(self, val):
self["specular"] = val
@property
def vertexnormalsepsilon(self):
"""
Epsilon for vertex normals calculation avoids math issues
arising from degenerate geometry.
The 'vertexnormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["vertexnormalsepsilon"]
@vertexnormalsepsilon.setter
def vertexnormalsepsilon(self, val):
self["vertexnormalsepsilon"] = val
@property
def _prop_descriptions(self):
return """\
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
"""
def __init__(
self,
arg=None,
ambient=None,
diffuse=None,
facenormalsepsilon=None,
fresnel=None,
roughness=None,
specular=None,
vertexnormalsepsilon=None,
**kwargs,
):
"""
Construct a new Lighting object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.Lighting`
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
Returns
-------
Lighting
"""
super().__init__("lighting")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.mesh3d.Lighting
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.Lighting`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("ambient", arg, ambient)
self._set_property("diffuse", arg, diffuse)
self._set_property("facenormalsepsilon", arg, facenormalsepsilon)
self._set_property("fresnel", arg, fresnel)
self._set_property("roughness", arg, roughness)
self._set_property("specular", arg, specular)
self._set_property("vertexnormalsepsilon", arg, vertexnormalsepsilon)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Lighting |
python | wandb__wandb | wandb/vendor/pygments/lexer.py | {
"start": 29818,
"end": 31054
} | class ____(RegexLexer):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
for tok in RegexLexer.get_tokens_unprocessed(self, text, stack):
yield tok
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)
| ProfilingRegexLexer |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 31518,
"end": 31786
} | class ____(VOTableSpecWarning):
"""The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
| W42 |
python | kamyu104__LeetCode-Solutions | Python/range-addition.py | {
"start": 33,
"end": 509
} | class ____(object):
def getModifiedArray(self, length, updates):
"""
:type length: int
:type updates: List[List[int]]
:rtype: List[int]
"""
result = [0] * length
for update in updates:
result[update[0]] += update[2]
if update[1]+1 < length:
result[update[1]+1] -= update[2]
for i in xrange(1, length):
result[i] += result[i-1]
return result
| Solution |
python | PrefectHQ__prefect | tests/server/models/test_task_runs.py | {
"start": 25720,
"end": 27452
} | class ____:
async def test_delete_task_run(self, task_run, session):
assert await models.task_runs.delete_task_run(
session=session, task_run_id=task_run.id
)
# make sure the task run is deleted
assert (
await models.task_runs.read_task_run(
session=session, task_run_id=task_run.id
)
) is None
async def test_delete_task_run_returns_false_if_does_not_exist(self, session):
assert not (
await models.task_runs.delete_task_run(session=session, task_run_id=uuid4())
)
async def test_delete_task_run_with_data(self, flow_run, session):
state_id = uuid4()
task_run = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id,
task_key="my-key",
dynamic_key="0",
state=schemas.states.State(
id=state_id,
type="COMPLETED",
name="My Running State",
data={"hello": "world"},
),
),
)
assert task_run.flow_run_id == flow_run.id
assert task_run.state.id == state_id
assert await models.task_runs.read_task_run(
session=session, task_run_id=task_run.id
)
assert await models.task_runs.delete_task_run(
session=session, task_run_id=task_run.id
)
# make sure the task run is deleted
assert (
await models.task_runs.read_task_run(
session=session, task_run_id=task_run.id
)
) is None
| TestDeleteTaskRun |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-numbers-non-positive.py | {
"start": 57,
"end": 681
} | class ____(object):
def minOperations(self, nums, x, y):
"""
:type nums: List[int]
:type x: int
:type y: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def check(total):
return sum(ceil_divide(max(v-min(ceil_divide(v, y), total)*y, 0), x-y) for v in nums) <= total
left, right = 1, ceil_divide(max(nums), y)
while left <= right:
mid = left+(right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_stateful.py | {
"start": 4545,
"end": 5686
} | class ____(RuleBasedStateMachine):
b1 = Bundle("b1")
b2 = Bundle("b2")
def __init__(self):
self.created_counter = 0
self.consumed_counter = 0
super().__init__()
@invariant()
def bundle_length(self):
assert len(self.bundle("b1")) == self.created_counter - self.consumed_counter
@rule(target=b1)
def populate_b1(self):
self.created_counter += 1
return self.created_counter
@rule(target=b2, consumed=consumes(b1))
def depopulate_b1(self, consumed):
self.consumed_counter += 1
return consumed
@rule(consumed=lists(consumes(b1), max_size=3))
def depopulate_b1_multiple(self, consumed):
self.consumed_counter += len(consumed)
@rule(value1=b1, value2=b2)
def check(self, value1, value2):
assert value1 != value2
TestMachineWithConsumingRule = MachineWithConsumingRule.TestCase
def test_multiple():
none = multiple()
some = multiple(1, 2.01, "3", b"4", 5)
assert len(none.values) == 0
assert len(some.values) == 5
assert set(some.values) == {1, 2.01, "3", b"4", 5}
| MachineWithConsumingRule |
python | huggingface__transformers | tests/models/bit/test_image_processing_bit.py | {
"start": 1050,
"end": 3252
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| BitImageProcessingTester |
python | mlflow__mlflow | mlflow/genai/scheduled_scorers.py | {
"start": 348,
"end": 3649
} | class ____:
"""
A scheduled scorer configuration for automated monitoring of generative AI applications.
Scheduled scorers are used to automatically evaluate traces logged to MLflow experiments
by production applications. They are part of `Databricks Lakehouse Monitoring for GenAI
<https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/monitoring>`_,
which helps track quality metrics like groundedness, safety, and guideline adherence
alongside operational metrics like volume, latency, and cost.
When configured, scheduled scorers run automatically in the background to evaluate
a sample of traces based on the specified sampling rate and filter criteria. The
Assessments are displayed in the Traces tab of the MLflow experiment and can be used to
identify quality issues in production.
Args:
scorer: The scorer function to run on sampled traces. Must be either a built-in
scorer (e.g., Safety, Correctness) or a function decorated with @scorer.
Subclasses of Scorer are not supported.
scheduled_scorer_name: The name for this scheduled scorer configuration
within the experiment. This name must be unique among all scheduled scorers
in the same experiment.
We recommend using the scorer's name (e.g., scorer.name) for consistency.
sample_rate: The fraction of traces to evaluate, between 0.0 and 1.0. For example,
0.1 means 10% of traces will be randomly selected for evaluation.
filter_string: An optional MLflow search_traces compatible filter string to apply
before sampling traces. Only traces matching this filter will be considered
for evaluation. Uses the same syntax as mlflow.search_traces().
Example:
.. code-block:: python
from mlflow.genai.scorers import Safety, scorer
from mlflow.genai.scheduled_scorers import ScorerScheduleConfig
# Using a built-in scorer
safety_config = ScorerScheduleConfig(
scorer=Safety(),
scheduled_scorer_name="production_safety",
sample_rate=0.2, # Evaluate 20% of traces
filter_string="trace.status = 'OK'",
)
# Using a custom scorer
@scorer
def response_length(outputs):
return len(str(outputs)) > 100
length_config = ScorerScheduleConfig(
scorer=response_length,
scheduled_scorer_name="adequate_length",
sample_rate=0.1, # Evaluate 10% of traces
filter_string="trace.status = 'OK'",
)
Note:
Scheduled scorers are executed automatically by Databricks and do not need to be
manually triggered. The Assessments appear in the Traces tab of the MLflow
experiment. Only traces logged directly to the experiment are monitored; traces
logged to individual runs within the experiment are not evaluated.
.. warning::
This API is in Beta and may change or be removed in a future release without warning.
"""
scorer: Scorer
scheduled_scorer_name: str
sample_rate: float
filter_string: str | None = None
| ScorerScheduleConfig |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 10309,
"end": 10511
} | class ____(BinOp):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += list(integer_binop_cases)
unsafe_casting = False
@infer_global(operator.and_)
| BitwiseLogicOperation |
python | ray-project__ray | python/ray/data/datasource/file_based_datasource.py | {
"start": 2547,
"end": 18370
} | class ____(Datasource):
"""File-based datasource for reading files.
Don't use this class directly. Instead, subclass it and implement `_read_stream()`.
"""
# If `_WRITE_FILE_PER_ROW` is `True`, this datasource calls `_write_row` and writes
# each row to a file. Otherwise, this datasource calls `_write_block` and writes
# each block to a file.
_WRITE_FILE_PER_ROW = False
_FILE_EXTENSIONS: Optional[Union[str, List[str]]] = None
# Number of threads for concurrent reading within each read task.
# If zero or negative, reading will be performed in the main thread.
_NUM_THREADS_PER_TASK = 0
def __init__(
self,
paths: Union[str, List[str]],
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
schema: Optional[Union[type, "pyarrow.lib.Schema"]] = None,
open_stream_args: Optional[Dict[str, Any]] = None,
meta_provider: BaseFileMetadataProvider = DefaultFileMetadataProvider(),
partition_filter: PathPartitionFilter = None,
partitioning: Partitioning = None,
ignore_missing_paths: bool = False,
shuffle: Optional[Union[Literal["files"], FileShuffleConfig]] = None,
include_paths: bool = False,
file_extensions: Optional[List[str]] = None,
):
super().__init__()
_check_pyarrow_version()
self._supports_distributed_reads = not _is_local_scheme(paths)
if not self._supports_distributed_reads and ray.util.client.ray.is_connected():
raise ValueError(
"Because you're using Ray Client, read tasks scheduled on the Ray "
"cluster can't access your local files. To fix this issue, store "
"files in cloud storage or a distributed filesystem like NFS."
)
self._schema = schema
self._data_context = DataContext.get_current()
self._open_stream_args = open_stream_args
self._meta_provider = meta_provider
self._partition_filter = partition_filter
self._partitioning = partitioning
self._ignore_missing_paths = ignore_missing_paths
self._include_paths = include_paths
# Need this property for lineage tracking
self._source_paths = paths
paths, self._filesystem = _resolve_paths_and_filesystem(paths, filesystem)
self._filesystem = RetryingPyFileSystem.wrap(
self._filesystem, retryable_errors=self._data_context.retried_io_errors
)
paths, file_sizes = map(
list,
zip(
*meta_provider.expand_paths(
paths,
self._filesystem,
partitioning,
ignore_missing_paths=ignore_missing_paths,
)
),
)
if ignore_missing_paths and len(paths) == 0:
raise ValueError(
"None of the provided paths exist. "
"The 'ignore_missing_paths' field is set to True."
)
if self._partition_filter is not None:
# Use partition filter to skip files which are not needed.
path_to_size = dict(zip(paths, file_sizes))
paths = self._partition_filter(paths)
file_sizes = [path_to_size[p] for p in paths]
if len(paths) == 0:
raise ValueError(
"No input files found to read. Please double check that "
"'partition_filter' field is set properly."
)
if file_extensions is not None:
path_to_size = dict(zip(paths, file_sizes))
paths = [p for p in paths if _has_file_extension(p, file_extensions)]
file_sizes = [path_to_size[p] for p in paths]
if len(paths) == 0:
raise ValueError(
"No input files found to read with the following file extensions: "
f"{file_extensions}. Please double check that "
"'file_extensions' field is set properly."
)
_validate_shuffle_arg(shuffle)
self._file_metadata_shuffler = None
if shuffle == "files":
self._file_metadata_shuffler = np.random.default_rng()
elif isinstance(shuffle, FileShuffleConfig):
# Create a NumPy random generator with a fixed seed if provided
self._file_metadata_shuffler = np.random.default_rng(shuffle.seed)
# Read tasks serialize `FileBasedDatasource` instances, and the list of paths
# can be large. To avoid slow serialization speeds, we store a reference to
# the paths rather than the paths themselves.
self._paths_ref = ray.put(paths)
self._file_sizes_ref = ray.put(file_sizes)
def _paths(self) -> List[str]:
return ray.get(self._paths_ref)
def _file_sizes(self) -> List[float]:
return ray.get(self._file_sizes_ref)
def estimate_inmemory_data_size(self) -> Optional[int]:
total_size = 0
for sz in self._file_sizes():
if sz is not None:
total_size += sz
return total_size
def get_read_tasks(
self, parallelism: int, per_task_row_limit: Optional[int] = None
) -> List[ReadTask]:
import numpy as np
open_stream_args = self._open_stream_args
partitioning = self._partitioning
paths = self._paths()
file_sizes = self._file_sizes()
if self._file_metadata_shuffler is not None:
files_metadata = list(zip(paths, file_sizes))
shuffled_files_metadata = [
files_metadata[i]
for i in self._file_metadata_shuffler.permutation(len(files_metadata))
]
paths, file_sizes = list(map(list, zip(*shuffled_files_metadata)))
filesystem = _wrap_s3_serialization_workaround(self._filesystem)
if open_stream_args is None:
open_stream_args = {}
def read_files(
read_paths: Iterable[str],
) -> Iterable[Block]:
nonlocal filesystem, open_stream_args, partitioning
fs = _unwrap_s3_serialization_workaround(filesystem)
for read_path in read_paths:
partitions: Dict[str, str] = {}
if partitioning is not None:
parse = PathPartitionParser(partitioning)
partitions = parse(read_path)
with RetryingContextManager(
self._open_input_source(fs, read_path, **open_stream_args),
context=self._data_context,
) as f:
for block in iterate_with_retry(
lambda: self._read_stream(f, read_path),
description="read stream iteratively",
match=self._data_context.retried_io_errors,
):
if partitions:
block = _add_partitions(block, partitions)
if self._include_paths:
block_accessor = BlockAccessor.for_block(block)
block = block_accessor.fill_column("path", read_path)
yield block
def create_read_task_fn(read_paths, num_threads):
def read_task_fn():
nonlocal num_threads, read_paths
# TODO: We should refactor the code so that we can get the results in
# order even when using multiple threads.
if self._data_context.execution_options.preserve_order:
num_threads = 0
if num_threads > 0:
num_threads = min(num_threads, len(read_paths))
logger.debug(
f"Reading {len(read_paths)} files with {num_threads} threads."
)
yield from make_async_gen(
iter(read_paths),
read_files,
num_workers=num_threads,
preserve_ordering=True,
)
else:
logger.debug(f"Reading {len(read_paths)} files.")
yield from read_files(read_paths)
return read_task_fn
# fix https://github.com/ray-project/ray/issues/24296
parallelism = min(parallelism, len(paths))
read_tasks = []
split_paths = np.array_split(paths, parallelism)
split_file_sizes = np.array_split(file_sizes, parallelism)
for read_paths, file_sizes in zip(split_paths, split_file_sizes):
if len(read_paths) <= 0:
continue
meta = self._meta_provider(
read_paths,
rows_per_file=self._rows_per_file(),
file_sizes=file_sizes,
)
read_task_fn = create_read_task_fn(read_paths, self._NUM_THREADS_PER_TASK)
read_task = ReadTask(
read_task_fn, meta, per_task_row_limit=per_task_row_limit
)
read_tasks.append(read_task)
return read_tasks
def resolve_compression(
self, path: str, open_args: Dict[str, Any]
) -> Optional[str]:
"""Resolves the compression format for a stream.
Args:
path: The file path to resolve compression for.
open_args: kwargs passed to
`pyarrow.fs.FileSystem.open_input_stream <https://arrow.apache.org/docs/python/generated/pyarrow.fs.FileSystem.html#pyarrow.fs.FileSystem.open_input_stream>`_
when opening input files to read.
Returns:
The compression format (e.g., "gzip", "snappy", "bz2") or None if
no compression is detected or specified.
"""
compression = open_args.get("compression", None)
if compression is None:
compression = infer_compression(path)
return compression
def _resolve_buffer_size(self, open_args: Dict[str, Any]) -> Optional[int]:
buffer_size = open_args.pop("buffer_size", None)
if buffer_size is None:
buffer_size = self._data_context.streaming_read_buffer_size
return buffer_size
def _file_to_snappy_stream(
self,
file: "pyarrow.NativeFile",
filesystem: "RetryingPyFileSystem",
) -> "pyarrow.PythonFile":
import pyarrow as pa
import snappy
from pyarrow.fs import HadoopFileSystem
stream = io.BytesIO()
if isinstance(filesystem.unwrap(), HadoopFileSystem):
snappy.hadoop_snappy.stream_decompress(src=file, dst=stream)
else:
snappy.stream_decompress(src=file, dst=stream)
stream.seek(0)
return pa.PythonFile(stream, mode="r")
def _open_input_source(
self,
filesystem: "RetryingPyFileSystem",
path: str,
**open_args,
) -> "pyarrow.NativeFile":
"""Opens a source path for reading and returns the associated Arrow NativeFile.
The default implementation opens the source path as a sequential input stream,
using self._data_context.streaming_read_buffer_size as the buffer size if none
is given by the caller.
Implementations that do not support streaming reads (e.g. that require random
access) should override this method.
"""
compression = self.resolve_compression(path, open_args)
buffer_size = self._resolve_buffer_size(open_args)
if compression == "snappy":
# Arrow doesn't support streaming Snappy decompression since the canonical
# C++ Snappy library doesn't natively support streaming decompression. We
# works around this by manually decompressing the file with python-snappy.
open_args["compression"] = None
file = filesystem.open_input_stream(
path, buffer_size=buffer_size, **open_args
)
return self._file_to_snappy_stream(file, filesystem)
open_args["compression"] = compression
return filesystem.open_input_stream(path, buffer_size=buffer_size, **open_args)
def _rows_per_file(self):
"""Returns the number of rows per file, or None if unknown."""
return None
def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]:
"""Streaming read a single file.
This method should be implemented by subclasses.
"""
raise NotImplementedError(
"Subclasses of FileBasedDatasource must implement _read_stream()."
)
@property
def supports_distributed_reads(self) -> bool:
return self._supports_distributed_reads
def _add_partitions(
data: Union["pyarrow.Table", "pd.DataFrame"], partitions: Dict[str, Any]
) -> Union["pyarrow.Table", "pd.DataFrame"]:
import pandas as pd
import pyarrow as pa
assert isinstance(data, (pa.Table, pd.DataFrame))
if isinstance(data, pa.Table):
return _add_partitions_to_table(data, partitions)
if isinstance(data, pd.DataFrame):
return _add_partitions_to_dataframe(data, partitions)
def _add_partitions_to_table(
table: "pyarrow.Table", partitions: Dict[str, Any]
) -> "pyarrow.Table":
import pyarrow as pa
import pyarrow.compute as pc
column_names = set(table.column_names)
for field, value in partitions.items():
column = pa.array([value] * len(table))
if field in column_names:
# TODO: Handle cast error.
column_type = table.schema.field(field).type
column = column.cast(column_type)
values_are_equal = pc.all(pc.equal(column, table[field]))
values_are_equal = values_are_equal.as_py()
if not values_are_equal:
raise ValueError(
f"Partition column {field} exists in table data, but partition "
f"value '{value}' is different from in-data values: "
f"{table[field].unique().to_pylist()}."
)
i = table.schema.get_field_index(field)
table = table.set_column(i, field, column)
else:
table = table.append_column(field, column)
return table
def _add_partitions_to_dataframe(
df: "pd.DataFrame", partitions: Dict[str, Any]
) -> "pd.DataFrame":
import pandas as pd
for field, value in partitions.items():
column = pd.Series(data=[value] * len(df), name=field)
if field in df:
column = column.astype(df[field].dtype)
mask = df[field].notna()
if not df[field][mask].equals(column[mask]):
raise ValueError(
f"Partition column {field} exists in table data, but partition "
f"value '{value}' is different from in-data values: "
f"{list(df[field].unique())}."
)
df[field] = column
return df
def _wrap_s3_serialization_workaround(filesystem: "pyarrow.fs.FileSystem"):
# This is needed because pa.fs.S3FileSystem assumes pa.fs is already
# imported before deserialization. See #17085.
import pyarrow as pa
import pyarrow.fs
base_fs = filesystem
if isinstance(filesystem, RetryingPyFileSystem):
base_fs = filesystem.unwrap()
if isinstance(base_fs, pa.fs.S3FileSystem):
return _S3FileSystemWrapper(filesystem)
return filesystem
def _unwrap_s3_serialization_workaround(
filesystem: Union["pyarrow.fs.FileSystem", "_S3FileSystemWrapper"],
):
if isinstance(filesystem, _S3FileSystemWrapper):
filesystem = filesystem.unwrap()
return filesystem
| FileBasedDatasource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 83455,
"end": 84020
} | class ____(sgqlc.types.Enum):
"""The reasons a piece of content can be reported or minimized.
Enumeration Choices:
* `ABUSE`: An abusive or harassing piece of content
* `DUPLICATE`: A duplicated piece of content
* `OFF_TOPIC`: An irrelevant piece of content
* `OUTDATED`: An outdated piece of content
* `RESOLVED`: The content has been resolved
* `SPAM`: A spammy piece of content
"""
__schema__ = github_schema
__choices__ = ("ABUSE", "DUPLICATE", "OFF_TOPIC", "OUTDATED", "RESOLVED", "SPAM")
| ReportedContentClassifiers |
python | getsentry__sentry | src/sentry/grouping/fingerprinting/rules.py | {
"start": 530,
"end": 1067
} | class ____(TypedDict):
# Each matcher is a list of [<name of event attribute to match>, <value to match>]
matchers: list[list[str]]
fingerprint: list[str]
attributes: NotRequired[FingerprintRuleAttributes]
is_builtin: NotRequired[bool]
# This is just `FingerprintRuleConfig` with an extra `text` entry and with `attributes` required
# rather than optional. (Unfortunately, you can't overwrite lack of required-ness when subclassing a
# TypedDict, so we have to create the full type independently.)
| FingerprintRuleConfig |
python | scipy__scipy | scipy/special/tests/test_erfinv.py | {
"start": 119,
"end": 3059
} | class ____:
def test_compliment(self):
# Test erfcinv(1 - x) == erfinv(x)
x = np.linspace(-1, 1, 101)
assert_allclose(sc.erfcinv(1 - x), sc.erfinv(x), rtol=0, atol=1e-15)
def test_literal_values(self):
# The expected values were calculated with mpmath:
#
# import mpmath
# mpmath.mp.dps = 200
# for y in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
# x = mpmath.erfinv(y)
# print(x)
#
y = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
actual = sc.erfinv(y)
expected = [
0.0,
0.08885599049425769,
0.1791434546212917,
0.2724627147267543,
0.37080715859355795,
0.4769362762044699,
0.5951160814499948,
0.7328690779592167,
0.9061938024368233,
1.1630871536766743,
]
assert_allclose(actual, expected, rtol=0, atol=1e-15)
@pytest.mark.parametrize(
'f, x, y',
[
(sc.erfinv, -1, -np.inf),
(sc.erfinv, 0, 0),
(sc.erfinv, 1, np.inf),
(sc.erfinv, -100, np.nan),
(sc.erfinv, 100, np.nan),
(sc.erfcinv, 0, np.inf),
(sc.erfcinv, 1, -0.0),
(sc.erfcinv, 2, -np.inf),
(sc.erfcinv, -100, np.nan),
(sc.erfcinv, 100, np.nan),
],
ids=[
'erfinv at lower bound',
'erfinv at midpoint',
'erfinv at upper bound',
'erfinv below lower bound',
'erfinv above upper bound',
'erfcinv at lower bound',
'erfcinv at midpoint',
'erfcinv at upper bound',
'erfcinv below lower bound',
'erfcinv above upper bound',
]
)
def test_domain_bounds(self, f, x, y):
assert_equal(f(x), y)
def test_erfinv_asympt(self):
# regression test for gh-12758: erfinv(x) loses precision at small x
# expected values precomputed with mpmath:
# >>> mpmath.mp.dps = 100
# >>> expected = [float(mpmath.erfinv(t)) for t in x]
x = np.array([1e-20, 1e-15, 1e-14, 1e-10, 1e-8, 0.9e-7, 1.1e-7, 1e-6])
expected = np.array([8.86226925452758e-21,
8.862269254527581e-16,
8.86226925452758e-15,
8.862269254527581e-11,
8.86226925452758e-09,
7.97604232907484e-08,
9.74849617998037e-08,
8.8622692545299e-07])
assert_allclose(sc.erfinv(x), expected,
rtol=1e-15)
# also test the roundtrip consistency
assert_allclose(sc.erf(sc.erfinv(x)),
x,
rtol=5e-15)
| TestInverseErrorFunction |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/sql_database/query.py | {
"start": 552,
"end": 633
} | class ____(TypedDict):
"""Input for a SQL Chain."""
question: str
| SQLInput |
python | Netflix__metaflow | metaflow/plugins/env_escape/communication/bytestream.py | {
"start": 0,
"end": 1866
} | class ____(object):
"""Basic interface that reads and writes bytes"""
def read(self, count, timeout=None):
"""
Reads exactly count bytes from the stream. This call is blocking until count bytes
are read or an error happens
This call returns a byte array or EOFError if there was a problem
reading.
Parameters
----------
count : int
Exact number of characters to read
Returns
-------
bytes
Content read from the stream
Raises
------
EOFError
Any issue with reading will be raised as a EOFError
"""
raise NotImplementedError
def write(self, data):
"""
Writes all the data to the stream
This call is blocking until all data is written. EOFError will be
raised if there is a problem writing to the stream
Parameters
----------
data : bytes
Data to write out
Raises
------
EOFError
Any issue with writing will be raised as a EOFError
"""
raise NotImplementedError
def close(self):
"""
Closes the stream releasing all system resources
Once closed, the stream cannot be re-opened or re-used. If a
stream is already closed, this operation will have no effect
"""
raise NotImplementedError()
@property
def is_closed(self):
"""
Returns True if the stream is closed or False otherwise
Returns
-------
bool
True if closed or False otherwise
"""
raise NotImplementedError()
def fileno(self):
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
| ByteStream |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py | {
"start": 1651,
"end": 4907
} | class ____(PForTestCase):
def __init__(self, method_name="runTest"):
super(PForTest, self).__init__(method_name)
context.context().enable_xla_devices()
def test_xla_einsum(self):
num_loop = 10
x_series = random_ops.random_uniform([num_loop, 9, 9])
y_series = random_ops.random_uniform([num_loop, 9, 1])
def loop_fn(i):
x = array_ops.gather(x_series, 0) # invariant.
y = array_ops.gather(y_series, 0) # invariant.
x_i = array_ops.gather(x_series, i)
y_i = array_ops.gather(y_series, i)
z1 = xla_ops.einsum(x_i, y, "ab,bc->ac")
z2 = xla_ops.einsum(x, y_i, "ab,bc->ac")
z3 = xla_ops.einsum(x, y, "ab,bc->ac")
z4 = xla_ops.einsum(x_i, y_i, "ab,bc->ac")
z5 = xla_ops.einsum(y_i, x_i, "cd,ce->de") # Includes transpose.
outputs = [z1, z2, z3, z4, z5]
return outputs
self._test_loop_fn(loop_fn, num_loop)
def test_xla(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
def vectorized_compute(x):
return pfor_control_flow_ops.vectorized_map(compute, x)
result = xla.compile(
vectorized_compute, inputs=[array_ops.ones((10, 5, 3))])
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_function_jit_compile(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
@def_function.function(jit_compile=True)
def vectorized_compute(x):
return pfor_control_flow_ops.vectorized_map(compute, x)
result = vectorized_compute(array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_xla_while_loop(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
def vectorized_compute(x, i):
inp = array_ops.gather(x, i)
output = pfor_control_flow_ops.vectorized_map(compute, inp)
output.set_shape([5, 1])
return output
def while_compute(x):
return while_loop.while_loop_v2(
lambda i, _: i < 10,
lambda i, y: (i + 1, y + vectorized_compute(x, i)),
(0, array_ops.zeros([5, 1])))[1]
result = xla.compile(while_compute, inputs=[array_ops.ones((10, 5, 3))])
expected = array_ops.ones([5, 1]) * 10
self.run_and_assert_equal(expected, result)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
@def_function.function(jit_compile=True)
def f():
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
return pfor_control_flow_ops.pfor(loop_fn, 8)
output = f()
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def _make_unstacked(cond, body, pfor_config):
def _cond(*args):
return math_ops.reduce_any(pfor_config.reduce_concat(args[0]))
def _body(*args):
not_done = args[0]
args = args[1:]
not_done = math_ops.logical_and(not_done, cond(*args))
outputs = body(*args)
return (not_done,) + tuple(
array_ops.where_v2(not_done, x, y) for x, y in zip(outputs, args))
return _cond, _body
@test_util.run_all_in_graph_and_eager_modes
| PForTest |
python | facelessuser__soupsieve | tests/test_level3/test_first_of_type.py | {
"start": 58,
"end": 1997
} | class ____(util.TestCase):
"""Test first of type selectors."""
def test_first_of_type_at_start(self):
"""Test first of type which is also the first sibling."""
markup = """
<body>
<p id="0"></p>
<p id="1"></p>
<span id="2"></span>
<span id="3"></span>
<span id="4"></span>
<span id="5"></span>
<span id="6"></span>
<p id="7"></p>
<p id="8"></p>
<p id="9"></p>
<p id="10"></p>
<span id="11"></span>
</body>
"""
self.assert_selector(
markup,
"p:first-of-type",
['0'],
flags=util.HTML
)
def test_first_of_type_at_middle(self):
"""Test first of type that is not the first sibling."""
markup = """
<body>
<p id="0"></p>
<p id="1"></p>
<span id="2"></span>
<span id="3"></span>
<span id="4"></span>
<span id="5"></span>
<span id="6"></span>
<p id="7"></p>
<p id="8"></p>
<p id="9"></p>
<p id="10"></p>
<span id="11"></span>
</body>
"""
self.assert_selector(
markup,
"span:first-of-type",
['2'],
flags=util.HTML
)
def test_any_first_of_type(self):
"""Test any first of type."""
markup = """
<body>
<p id="0"></p>
<p id="1"></p>
<span id="2"></span>
<span id="3"></span>
<span id="4"></span>
<span id="5"></span>
<span id="6"></span>
<p id="7"></p>
<p id="8"></p>
<p id="9"></p>
<p id="10"></p>
<span id="11"></span>
</body>
"""
self.assert_selector(
markup,
"body :first-of-type",
['0', '2'],
flags=util.HTML
)
| TestFirstOfType |
python | numba__numba | numba/typed/listobject.py | {
"start": 1800,
"end": 2179
} | class ____(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('size', types.intp), # the size of the iteration space
('parent', fe_type.parent), # the parent list
('index', types.EphemeralPointer(types.intp)), # current index
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
| ListIterModel |
python | cookiecutter__cookiecutter | tests/test-extensions/local_extension/local_extensions/main.py | {
"start": 176,
"end": 613
} | class ____(Extension):
"""Simple jinja2 extension for cookiecutter test purposes."""
def __init__(self, environment: Environment) -> None:
"""Foobar Extension Constructor."""
super().__init__(environment)
environment.filters['foobar'] = lambda v: v * 2
@simple_filter
def simplefilterextension(v: str) -> str:
"""Provide a simple function-based filter extension."""
return v.upper()
| FoobarExtension |
python | walkccc__LeetCode | solutions/1504. Count Submatrices With All Ones/1504.py | {
"start": 0,
"end": 477
} | class ____:
def numSubmat(self, mat: list[list[int]]) -> int:
m = len(mat)
n = len(mat[0])
ans = 0
for baseRow in range(m):
row = [1] * n
for i in range(baseRow, m):
for j in range(n):
row[j] &= mat[i][j]
ans += self._count(row)
return ans
def _count(self, row: list[int]) -> int:
res = 0
length = 0
for num in row:
length = 0 if num == 0 else length + 1
res += length
return res
| Solution |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 55825,
"end": 58789
} | class ____(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
]
| BertForQuestionAnswering |
python | kamyu104__LeetCode-Solutions | Python/set-mismatch.py | {
"start": 1103,
"end": 1454
} | class ____(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
N = len(nums)
x_minus_y = sum(nums) - N*(N+1)//2
x_plus_y = (sum(x*x for x in nums) - N*(N+1)*(2*N+1)/6) // x_minus_y
return (x_plus_y+x_minus_y) // 2, (x_plus_y-x_minus_y) // 2
| Solution3 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 931344,
"end": 931623
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
RepositoryAuditEntryData,
OrganizationAuditEntryData,
TopicAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
| RepoAddTopicAuditEntry |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 15552,
"end": 16634
} | class ____(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
| SocketConnectedTest |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 27807,
"end": 28554
} | class ____(ModelOutput):
r"""
object_ids (`list[int]`, *optional*):
List of object IDs being tracked in the current frame.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
The predicted masks stored at the model's resolution.
object_score_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):
Logits for the object scores, indicating if objects are present.
frame_idx (`int`):
The frame index of the video.
"""
object_ids: Optional[list[int]] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
frame_idx: Optional[int] = None
@auto_docstring
| Sam2VideoSegmentationOutput |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datastore.py | {
"start": 10197,
"end": 12624
} | class ____(GoogleCloudBaseOperator):
"""
Allocate IDs for incomplete keys. Return list of keys.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreAllocateIdsOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"partial_keys",
"impersonation_chain",
)
operator_extra_links = (CloudDatastoreEntitiesLink(),)
def __init__(
self,
*,
partial_keys: list,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.partial_keys = partial_keys
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
}
def execute(self, context: Context) -> list:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
keys = hook.allocate_ids(
partial_keys=self.partial_keys,
project_id=self.project_id,
)
CloudDatastoreEntitiesLink.persist(context=context)
return keys
| CloudDatastoreAllocateIdsOperator |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib_type_spec_test.py | {
"start": 23819,
"end": 31293
} | class ____(test.TestCase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
enable_get_next_as_optional=[True, False]))
def testTypeSpec(self, distribution, enable_get_next_as_optional):
ctx = distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(8)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset = dataset.batch(batch_size)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
with distribution.scope():
iterator = iter(dist_dataset)
_check_type_spec_structure(iterator)
spec = iterator._type_spec
self.assertEqual(spec._input_workers, iterator._input_workers)
self.assertEqual(
spec._element_spec, {
"sparse":
values.PerReplicaSpec(
sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None, 3]), dtypes.float32),
sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None, 3]), dtypes.float32)),
"dense":
values.PerReplicaSpec(
tensor_spec.TensorSpec(
shape=(None, 3), dtype=dtypes.float32, name=None),
tensor_spec.TensorSpec(
shape=(None, 3), dtype=dtypes.float32, name=None)),
"ragged":
values.PerReplicaSpec(
ragged_tensor_lib.RaggedTensorSpec(
tensor_shape.TensorShape([None, None]), dtypes.float32,
1, dtypes.int64),
ragged_tensor_lib.RaggedTensorSpec(
tensor_shape.TensorShape([None, None]), dtypes.float32,
1, dtypes.int64))
})
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
enable_get_next_as_optional=[True, False]))
def testTypeSpecRoundTrip(self, distribution, enable_get_next_as_optional):
ctx = distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(8)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset = dataset.batch(batch_size)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
if isinstance(distribution,
(tpu_strategy.TPUStrategyV2, tpu_strategy.TPUStrategy)):
# TPUStrategy does not support distributed datasets with device prefetch
# when using sparse or ragged tensors.
options = distribute_lib.InputOptions(experimental_fetch_to_device=False)
else:
options = None
dist_dataset = distribution.experimental_distribute_dataset(
dataset, options)
with distribution.scope():
iterator = iter(dist_dataset)
_check_type_spec_structure(iterator)
spec = iterator._type_spec
tensor_list = spec._to_components(iterator)
re_iterator = spec._from_components(tensor_list)
self.assertEqual(iterator._input_workers, re_iterator._input_workers)
self.assertAllEqual(iterator._iterators, re_iterator._iterators)
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
],
enable_get_next_as_optional=[True, False]))
def testDoesNotTriggerFunctionTracing(self, distribution,
enable_get_next_as_optional):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for _ in range(5):
next(iterator)
counter += 1
return counter
ctx = distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(8)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(50), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(50, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset = dataset.batch(batch_size)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
if isinstance(distribution,
(tpu_strategy.TPUStrategyV2, tpu_strategy.TPUStrategy)):
# TPUStrategy does not support distributed datasets with device prefetch
# when using sparse or ragged tensors.
options = distribute_lib.InputOptions(experimental_fetch_to_device=False)
else:
options = None
dist_dataset = distribution.experimental_distribute_dataset(
dataset, options)
with distribution.scope():
for _ in range(3):
iterator = iter(dist_dataset)
_check_type_spec_structure(iterator)
counter = f(iterator)
self.assertEqual(trace_count[0], 1)
self.assertEqual(counter, 5)
def _check_type_spec_structure(x):
"""Verifies that `x` has the same structure as its `TypeSpec`."""
if isinstance(x, composite_tensor.CompositeTensor):
nest.assert_same_structure(x, x._type_spec, expand_composites=True)
def _create_text_file(fname, num_lines):
with open(fname, "w") as f:
for i in range(num_lines):
f.write("%d\n" % i)
if __name__ == "__main__":
test_util.main()
| RaggedTensorDistributedIteratorTest |
python | coleifer__peewee | tests/dataset.py | {
"start": 518,
"end": 670
} | class ____(TestModel):
user = ForeignKeyField(User)
content = TextField()
timestamp = DateTimeField()
status = IntegerField(default=1)
| Note |
python | ansible__ansible | lib/ansible/module_utils/facts/network/hurd.py | {
"start": 2962,
"end": 3066
} | class ____(NetworkCollector):
_platform = 'GNU'
_fact_class = HurdPfinetNetwork
| HurdNetworkCollector |
python | django-extensions__django-extensions | tests/test_admin_widgets.py | {
"start": 149,
"end": 1559
} | class ____(TestCase):
def test_widget_works(self):
name = models.Name.objects.create(name="Name")
person = models.Person.objects.create(
name=name,
age=30,
)
club = models.Club.objects.create(
name="Club",
)
membership = models.Membership.objects.create(club=club, person=person)
widget = widgets.ForeignKeySearchInput(
models.Membership._meta.get_field("person").remote_field,
["person__name"],
)
label = widget.label_for_value(membership.pk)
self.assertEqual(
Truncator(person).words(14, truncate="..."),
label,
)
# Just making sure rendering the widget doesn't cause any issue
widget.render("person", person.pk)
widget.render("person", None)
# Check media to make sure rendering media doesn't cause any issue
self.assertListEqual(
[
"/static/django_extensions/js/jquery.bgiframe.js",
"/static/django_extensions/js/jquery.ajaxQueue.js",
"/static/django_extensions/js/jquery.autocomplete.js",
],
widget.media._js,
)
self.assertListEqual(
["/static/django_extensions/css/jquery.autocomplete.css"],
list(widget.media._css["all"]),
)
| ForeignKeySearchInputTestCase |
python | facebook__pyre-check | pyre_extensions/type_variable_operators.py | {
"start": 525,
"end": 612
} | class ____(metaclass=ParameterSpecificationComponentMeta):
pass
| PositionalArgumentsOf |
python | ray-project__ray | python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py | {
"start": 3958,
"end": 15311
} | class ____(unittest.TestCase):
"""Test NvidiaGpuProvider class."""
def setUp(self):
"""Set up test fixtures."""
self.provider = NvidiaGpuProvider()
def test_get_provider_name(self):
"""Test provider name."""
self.assertEqual(self.provider.get_provider_name(), GpuProviderType.NVIDIA)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_is_available_success(self, mock_pynvml):
"""Test is_available when NVIDIA GPU is available."""
mock_pynvml.nvmlInit.return_value = None
mock_pynvml.nvmlShutdown.return_value = None
# Mock sys.modules to make the import work
import sys
original_modules = sys.modules.copy()
sys.modules["ray._private.thirdparty.pynvml"] = mock_pynvml
try:
self.assertTrue(self.provider.is_available())
mock_pynvml.nvmlInit.assert_called_once()
mock_pynvml.nvmlShutdown.assert_called_once()
finally:
# Restore original modules
sys.modules.clear()
sys.modules.update(original_modules)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_is_available_failure(self, mock_pynvml):
"""Test is_available when NVIDIA GPU is not available."""
mock_pynvml.nvmlInit.side_effect = Exception("NVIDIA driver not found")
# Mock sys.modules to make the import work but nvmlInit fail
import sys
original_modules = sys.modules.copy()
sys.modules["ray._private.thirdparty.pynvml"] = mock_pynvml
try:
self.assertFalse(self.provider.is_available())
finally:
# Restore original modules
sys.modules.clear()
sys.modules.update(original_modules)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_initialize_success(self, mock_pynvml):
"""Test successful initialization."""
# Ensure provider starts fresh
self.provider._initialized = False
mock_pynvml.nvmlInit.return_value = None
# Mock sys.modules to make the import work
import sys
original_modules = sys.modules.copy()
sys.modules["ray._private.thirdparty.pynvml"] = mock_pynvml
try:
self.assertTrue(self.provider._initialize())
self.assertTrue(self.provider._initialized)
mock_pynvml.nvmlInit.assert_called_once()
finally:
# Restore original modules
sys.modules.clear()
sys.modules.update(original_modules)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_initialize_failure(self, mock_pynvml):
"""Test failed initialization."""
# Ensure provider starts fresh
self.provider._initialized = False
# Make nvmlInit fail
mock_pynvml.nvmlInit.side_effect = Exception("Initialization failed")
# Mock sys.modules to make the import work but nvmlInit fail
import sys
original_modules = sys.modules.copy()
sys.modules["ray._private.thirdparty.pynvml"] = mock_pynvml
try:
self.assertFalse(self.provider._initialize())
self.assertFalse(self.provider._initialized)
finally:
# Restore original modules
sys.modules.clear()
sys.modules.update(original_modules)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_initialize_already_initialized(self, mock_pynvml):
"""Test initialization when already initialized."""
self.provider._initialized = True
self.assertTrue(self.provider._initialize())
mock_pynvml.nvmlInit.assert_not_called()
@patch("ray._private.thirdparty.pynvml", create=True)
def test_shutdown(self, mock_pynvml):
"""Test shutdown."""
self.provider._initialized = True
self.provider._pynvml = mock_pynvml
self.provider._shutdown()
self.assertFalse(self.provider._initialized)
mock_pynvml.nvmlShutdown.assert_called_once()
@patch("ray._private.thirdparty.pynvml", create=True)
def test_shutdown_not_initialized(self, mock_pynvml):
"""Test shutdown when not initialized."""
self.provider._shutdown()
mock_pynvml.nvmlShutdown.assert_not_called()
@patch("ray._private.thirdparty.pynvml", create=True)
def test_get_gpu_utilization_success(self, mock_pynvml):
"""Test successful GPU utilization retrieval."""
# Mock GPU device
mock_handle = Mock()
mock_memory_info = Mock()
mock_memory_info.used = 8 * MB * 1024 # 8GB used
mock_memory_info.total = 12 * MB * 1024 # 12GB total
mock_utilization_info = Mock()
mock_utilization_info.gpu = 75
mock_process = Mock()
mock_process.pid = 1234
mock_process.usedGpuMemory = 256 * MB
# Configure mocks
mock_pynvml.nvmlInit.return_value = None
mock_pynvml.nvmlDeviceGetCount.return_value = 1
mock_pynvml.nvmlDeviceGetHandleByIndex.return_value = mock_handle
mock_pynvml.nvmlDeviceGetMemoryInfo.return_value = mock_memory_info
mock_pynvml.nvmlDeviceGetUtilizationRates.return_value = mock_utilization_info
mock_pynvml.nvmlDeviceGetComputeRunningProcesses.return_value = [mock_process]
mock_pynvml.nvmlDeviceGetGraphicsRunningProcesses.return_value = []
mock_pynvml.nvmlDeviceGetName.return_value = b"NVIDIA GeForce RTX 3080"
mock_pynvml.nvmlDeviceGetUUID.return_value = (
b"GPU-12345678-1234-1234-1234-123456789abc"
)
mock_pynvml.nvmlShutdown.return_value = None
# Set up provider state
self.provider._pynvml = mock_pynvml
self.provider._initialized = True
result = self.provider.get_gpu_utilization()
self.assertEqual(len(result), 1)
gpu_info = result[0]
self.assertEqual(gpu_info["index"], 0)
self.assertEqual(gpu_info["name"], "NVIDIA GeForce RTX 3080")
self.assertEqual(gpu_info["uuid"], "GPU-12345678-1234-1234-1234-123456789abc")
self.assertEqual(gpu_info["utilization_gpu"], 75)
self.assertEqual(gpu_info["memory_used"], 8 * 1024) # 8GB in MB
self.assertEqual(gpu_info["memory_total"], 12 * 1024) # 12GB in MB
self.assertEqual(len(gpu_info["processes_pids"]), 1)
self.assertEqual(gpu_info["processes_pids"][1234]["pid"], 1234)
self.assertEqual(gpu_info["processes_pids"][1234]["gpu_memory_usage"], 256)
@patch("ray._private.thirdparty.pynvml", create=True)
def test_get_gpu_utilization_with_errors(self, mock_pynvml):
"""Test GPU utilization retrieval with partial errors."""
mock_handle = Mock()
mock_memory_info = Mock()
mock_memory_info.used = 4 * MB * 1024
mock_memory_info.total = 8 * MB * 1024
# Create mock NVML error class
class MockNVMLError(Exception):
pass
mock_pynvml.NVMLError = MockNVMLError
# Configure mocks with some failures
mock_pynvml.nvmlInit.return_value = None
mock_pynvml.nvmlDeviceGetCount.return_value = 1
mock_pynvml.nvmlDeviceGetHandleByIndex.return_value = mock_handle
mock_pynvml.nvmlDeviceGetMemoryInfo.return_value = mock_memory_info
mock_pynvml.nvmlDeviceGetUtilizationRates.side_effect = MockNVMLError(
"Utilization not available"
)
mock_pynvml.nvmlDeviceGetComputeRunningProcesses.side_effect = MockNVMLError(
"Process info not available"
)
mock_pynvml.nvmlDeviceGetGraphicsRunningProcesses.side_effect = MockNVMLError(
"Process info not available"
)
mock_pynvml.nvmlDeviceGetName.return_value = b"NVIDIA Tesla V100"
mock_pynvml.nvmlDeviceGetUUID.return_value = (
b"GPU-87654321-4321-4321-4321-ba9876543210"
)
mock_pynvml.nvmlShutdown.return_value = None
# Set up provider state
self.provider._pynvml = mock_pynvml
self.provider._initialized = True
result = self.provider.get_gpu_utilization()
self.assertEqual(len(result), 1)
gpu_info = result[0]
self.assertEqual(gpu_info["index"], 0)
self.assertEqual(gpu_info["name"], "NVIDIA Tesla V100")
self.assertEqual(gpu_info["utilization_gpu"], -1) # Should be -1 due to error
self.assertEqual(
gpu_info["processes_pids"], {}
) # Should be empty dict due to error
@patch("ray._private.thirdparty.pynvml", create=True)
def test_get_gpu_utilization_with_mig(self, mock_pynvml):
"""Test GPU utilization retrieval with MIG devices."""
# Mock regular GPU handle
mock_gpu_handle = Mock()
mock_memory_info = Mock()
mock_memory_info.used = 4 * MB * 1024
mock_memory_info.total = 8 * MB * 1024
# Mock MIG device handle and info
mock_mig_handle = Mock()
mock_mig_memory_info = Mock()
mock_mig_memory_info.used = 2 * MB * 1024
mock_mig_memory_info.total = 4 * MB * 1024
mock_mig_utilization_info = Mock()
mock_mig_utilization_info.gpu = 80
# Configure mocks for MIG-enabled GPU
mock_pynvml.nvmlInit.return_value = None
mock_pynvml.nvmlDeviceGetCount.return_value = 1
mock_pynvml.nvmlDeviceGetHandleByIndex.return_value = mock_gpu_handle
# MIG mode enabled
mock_pynvml.nvmlDeviceGetMigMode.return_value = (
True,
True,
) # (current, pending)
mock_pynvml.nvmlDeviceGetMaxMigDeviceCount.return_value = 1 # Only 1 MIG device
mock_pynvml.nvmlDeviceGetMigDeviceHandleByIndex.return_value = mock_mig_handle
# MIG device info
mock_pynvml.nvmlDeviceGetMemoryInfo.return_value = mock_mig_memory_info
mock_pynvml.nvmlDeviceGetUtilizationRates.return_value = (
mock_mig_utilization_info
)
mock_pynvml.nvmlDeviceGetComputeRunningProcesses.return_value = []
mock_pynvml.nvmlDeviceGetGraphicsRunningProcesses.return_value = []
mock_pynvml.nvmlDeviceGetName.return_value = b"NVIDIA A100-SXM4-40GB MIG 1g.5gb"
mock_pynvml.nvmlDeviceGetUUID.return_value = (
b"MIG-12345678-1234-1234-1234-123456789abc"
)
mock_pynvml.nvmlShutdown.return_value = None
# Set up provider state
self.provider._pynvml = mock_pynvml
self.provider._initialized = True
result = self.provider.get_gpu_utilization()
# Should return MIG device info instead of regular GPU
self.assertEqual(
len(result), 1
) # Only one MIG device due to exception handling
gpu_info = result[0]
self.assertEqual(gpu_info["index"], 0) # First MIG device (0 * 1000 + 0)
self.assertEqual(gpu_info["name"], "NVIDIA A100-SXM4-40GB MIG 1g.5gb")
self.assertEqual(gpu_info["uuid"], "MIG-12345678-1234-1234-1234-123456789abc")
self.assertEqual(gpu_info["utilization_gpu"], 80)
self.assertEqual(gpu_info["memory_used"], 2 * 1024) # 2GB in MB
self.assertEqual(gpu_info["memory_total"], 4 * 1024) # 4GB in MB
self.assertEqual(gpu_info["processes_pids"], {})
| TestNvidiaGpuProvider |
python | keras-team__keras | keras/src/layers/preprocessing/index_lookup.py | {
"start": 347,
"end": 42991
} | class ____(Layer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output
via a table-based lookup, with optional out-of-vocabulary handling. This is
the basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer.
If `None`, there is no cap on the size of the vocabulary.
Note that this size includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are hashed to determine
their OOV value. If this value is 0,
OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs.
When `output_mode` is `"int"`,
the token is included in vocabulary and mapped to index 0.
In other output modes, the token will not appear in the vocabulary
and instances of the mask token in the input will be dropped.
If set to `None`, no mask term will be added.
oov_token: Only used when `invert` is `True`.
The token to return for OOV indices.
vocabulary: Optional. Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D numpy array,
or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms.
For example, `"int64"` or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D numpy array, or 1D tensor or the same length
as the vocabulary, containing the floating point
inverse document frequency weights, which will be multiplied
by per sample term counts for the final TF-IDF
weight. If the `vocabulary` argument is set, and `output_mode`
is `"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1
at the element index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into
a single array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`, output shape will
be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count
of the number of times the token at that index appeared
in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
Defaults to `"int"`.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have its
feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
`"count"` and `"tf-idf"` output modes.
If `True`, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to `False`.
"""
def __init__(
self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}"
)
if pad_to_max_tokens and max_tokens is None:
raise ValueError(
"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}"
)
if num_oov_indices < 0:
raise ValueError(
"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}"
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if invert and output_mode != "int":
raise ValueError(
"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
if idf_weights is not None and output_mode != "tf_idf":
raise ValueError(
"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}"
)
super().__init__(name=name)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = tf.as_dtype(vocabulary_dtype).name
self._frozen_vocab_size = kwargs.pop("vocabulary_size", None)
# Remember original `vocabulary` as `input_vocabulary` for serialization
# via `get_config`. However, if `vocabulary` is a file path or a URL, we
# serialize the vocabulary as an asset and clear the original path/URL.
self.input_vocabulary = (
vocabulary if not isinstance(vocabulary, str) else None
)
self.input_idf_weights = idf_weights
# We set this hidden attr to
# persist the fact that we have have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
kwargs.pop("trainable", None)
kwargs.pop("dtype", None)
if kwargs:
raise ValueError(f"Unrecognized keyword argument(s): {kwargs}")
if invert:
self._key_dtype = "int64"
self._value_dtype = self.vocabulary_dtype
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = self.vocabulary_dtype
self._value_dtype = "int64"
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max
# ints will be dropped from the bincount op.
mask_value = (
0
if self.output_mode == "int"
else tf.as_dtype(self._value_dtype).max
)
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error
# out during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the
# default value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we have multiple OOV values, we need to do a further
# hashing step; to make this easier, we set the OOV value to -1.
# (This lets us do a vectorized add and cast to boolean to
# determine locations where we need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(
mask_value, self._value_dtype
)
if self.output_mode == "tf_idf":
if self._has_input_vocabulary and idf_weights is None:
raise ValueError(
"When specifying the `vocabulary` argument, "
"in TF-IDF output mode, the `idf_weights` argument "
"must also be provided."
)
if idf_weights is not None:
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will
# expect to find and restore a lookup_table attribute on the layer.
# This table needs to be uninitialized as a StaticHashTable cannot
# be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not receive a vocab on construction.
if not self._has_input_vocabulary:
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
if self.output_mode == "tf_idf":
self.token_document_counts = (
tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
)
self.num_documents = tf.Variable(
0, dtype="int64", trainable=False
)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include mask and OOV tokens,
and a term's index in the vocabulary
will equal the term's index when calling the layer.
If `False`, the returned vocabulary will not include
any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices
# [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (
self._tensor_vocab_to_numpy(vocab),
indices.numpy(),
)
lookup = collections.defaultdict(
lambda: self.oov_token, zip(indices, vocab)
)
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == "int":
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index() :]
if self.vocabulary_dtype == "string":
return [
i.decode("utf-8") if isinstance(i, bytes) else i for i in vocab
]
else:
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional mask and oov
indices.
"""
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"sparse": self.sparse,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": listify_tensors(self.input_idf_weights),
"vocabulary": listify_tensors(self.input_vocabulary),
"vocabulary_size": self._frozen_vocab_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _record_vocabulary_size(self):
self._ensure_vocab_size_unchanged()
with tf.init_scope():
self._frozen_vocab_size = self.vocabulary_size()
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list,
1D numpy array, or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor
of inverse document frequency weights with equal
length to vocabulary. Must be set if `output_mode`
is `"tf_idf"`. Should not be set otherwise.
"""
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"`idf_weights` must be set if output_mode is 'tf_idf'."
)
elif idf_weights is not None:
raise ValueError(
"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}"
)
if isinstance(vocabulary, str):
if serialization_lib.in_safe_mode():
raise ValueError(
"Requested the loading of a vocabulary file outside of the "
"model archive. This carries a potential risk of loading "
"arbitrary and sensitive files and thus it is disallowed "
"by default. If you trust the source of the artifact, you "
"can override this error by passing `safe_mode=False` to "
"the loading function, or calling "
"`keras.config.enable_unsafe_deserialization(). "
f"Vocabulary file: '{vocabulary}'"
)
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
f"Vocabulary file {vocabulary} does not exist."
)
if self.output_mode == "tf_idf":
raise ValueError(
"output_mode `'tf_idf'` does not support loading a "
"vocabulary from file."
)
self.lookup_table = self._lookup_table_from_file(vocabulary)
self._record_vocabulary_size()
return
if not tf.executing_eagerly() and (
tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)
):
raise RuntimeError(
f"Cannot set a tensor vocabulary on layer {self.name} "
"when not executing eagerly. "
"Create this layer or call `set_vocabulary()` "
"outside of any traced function."
)
# TODO(mattdangerw): for better performance we should rewrite this
# entire function to operate on tensors and convert vocabulary to a
# tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary. "
f"Received: vocabulary={vocabulary}"
)
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = [self.mask_token] * oov_start + [
self.oov_token
] * self.num_oov_indices
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start]
)
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError(
"The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
f"are: {repeated_tokens}"
)
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}"
)
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if (
self.oov_token is not None
and self.invert
and self.oov_token in tokens
):
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}"
)
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab "
f"size. Received vocabulary size is {new_vocab_size}; "
f"`max_tokens` is {self.max_tokens}."
)
self.lookup_table = self._lookup_table_from_tokens(tokens)
self._record_vocabulary_size()
if self.output_mode == "tf_idf" and idf_weights is not None:
if len(vocabulary) != len(idf_weights):
raise ValueError(
"`idf_weights` must be the same length as vocabulary. "
f"len(idf_weights) is {len(idf_weights)}; "
f"len(vocabulary) is {len(vocabulary)}"
)
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array. "
f"Received: type(idf_weights)={type(idf_weights)}"
)
# If the passed vocabulary has no special tokens, we need to pad the
# front of idf_weights. We don't have real document frequencies for
# these tokens so we will use an average of all idf_weights passed
# in as a reasonable default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our
# total vocab size, we need to pad the back of idf_weights with
# zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = (
self.max_tokens - front_padding - len(idf_weights)
)
else:
back_padding = 0
weights = np.pad(
idf_weights,
(front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value),
)
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights = tf.Variable(
weights,
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
def get_build_config(self):
return {}
def build_from_config(self, config):
self.build(None)
@property
def compute_dtype(self):
return self.vocabulary_dtype
@property
def variable_dtype(self):
return self.vocabulary_dtype
def compute_output_shape(self, input_shape):
if self.output_mode == "int":
return input_shape
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
return (input_shape[0], depth)
def compute_output_spec(self, inputs):
if self.output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
output_shape = self.compute_output_shape(inputs.shape)
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, steps=None):
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
f"Cannot adapt layer '{self.name}' after setting a static "
"vocabulary via `vocabulary` argument or "
"`set_vocabulary()` method."
)
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, 0)
if data.shape.rank == 1:
# Expand dims on axis 0 for tf-idf. A 1-d tensor
# is a single document.
data = tf.expand_dims(data, 0)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(
tokens, counts + self.token_counts.lookup(tokens)
)
if self.output_mode == "tf_idf":
# Dedupe each row of our dataset.
if isinstance(data, tf.RaggedTensor):
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
else:
deduped_doc_data = [tf.unique(x)[0] for x in data]
deduped_doc_data = tf.concat(deduped_doc_data, axis=0)
# Flatten and count tokens.
tokens, counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, counts + self.token_document_counts.lookup(tokens)
)
if isinstance(data, tf.RaggedTensor):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(
tf.shape(data, out_type="int64")[0]
)
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == "tf_idf":
self.idf_weights_const = self.idf_weights.value()
self._record_vocabulary_size()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype)
)
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype)
)
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break
# ties by sorting the tokens themselves. Tensorflow has no ops for
# sorting strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == "tf_idf":
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(
token_document_counts, self.num_documents
)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV
# tokens. We cannot compute the real idf weight of OOV in a single
# pass.
idf_weights = tf.pad(
idf_weights,
[[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights),
)
if self.pad_to_max_tokens and self.max_tokens is not None:
# Pad the back of idf_weights with zeros.
idf_weights = tf.pad(
idf_weights,
[[0, self.max_tokens - tf.size(idf_weights)]],
constant_values=0,
)
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary,
# we don't want to keep every token we've seen in separate lookup
# tables.
self.reset_state()
self._record_vocabulary_size()
def reset_state(self):
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == "tf_idf":
self.token_document_counts.remove(
self.token_document_counts.export()[0]
)
self.num_documents.assign(0)
def call(self, inputs):
from keras.src.backend import tensorflow as tf_backend
self._ensure_known_vocab_size()
inputs = tf_utils.ensure_tensor(inputs, dtype=self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if isinstance(inputs, tf.SparseTensor):
lookups = tf.SparseTensor(
inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape,
)
elif isinstance(inputs, tf.RaggedTensor):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == "int":
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
idf_weights = (
self.idf_weights_const if self.output_mode == "tf_idf" else None
)
output = numerical_utils.encode_categorical_inputs(
lookups,
output_mode=(
"count" if self.output_mode == "tf_idf" else self.output_mode
),
depth=depth,
dtype=self._value_dtype,
sparse=self.sparse,
backend_module=tf_backend,
)
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"When `output_mode` is `'tf_idf'`, `idf_weights` must be "
"provided."
)
output = tf_backend.numpy.multiply(
tf_backend.core.cast(output, idf_weights.dtype), idf_weights
)
return output
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Input objects,
# do not call lookup.
# This is critical for restoring SavedModel, which will first trace
# layer.call and then attempt to restore the table. We need the table to
# be uninitialized for the restore to work, but calling the table
# uninitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,),
)
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if tf.as_dtype(self._key_dtype).is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices
)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def save_own_variables(self, store):
if self.output_mode == "tf_idf":
store["idf_weights"] = self.idf_weights_const.numpy()
def load_own_variables(self, store):
if self.output_mode == "tf_idf":
self.idf_weights.assign(store["idf_weights"])
self.idf_weights_const = self.idf_weights.value()
def save_assets(self, dir_path):
if self.input_vocabulary is not None:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary = self.get_vocabulary(include_special_tokens=True)
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
with open(vocabulary_filepath, "w") as f:
f.write("\n".join([str(w) for w in vocabulary]))
def load_assets(self, dir_path):
if self.input_vocabulary is not None:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
# TODO: fix bug with include_special_tokens and set reload from file.
with open(vocabulary_filepath, "r") as f:
lines = f.read().split("\n")
if tf.as_dtype(self.vocabulary_dtype) == tf.string:
values = [str(line) for line in lines]
else:
values = [int(line) for line in lines]
if self.output_mode == "tf_idf":
self.set_vocabulary(values, idf_weights=False)
else:
self.set_vocabulary(values)
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = get_null_initializer(
self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices_dtype = (
self._key_dtype if self.invert else self._value_dtype
)
indices = tf.range(token_start, token_end, dtype=indices_dtype)
keys, values = (
(indices, tokens) if self.invert else (tokens, indices)
)
initializer = tf.lookup.KeyValueTensorInitializer(
keys, values, self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index(),
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if isinstance(inputs, tf.SparseTensor):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _oov_start_index(self):
return (
1
if self.mask_token is not None and self.output_mode == "int"
else 0
)
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _ensure_known_vocab_size(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
if self._frozen_vocab_size is None:
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"you must set the layer's vocabulary before calling it. Either "
"pass a `vocabulary` argument to the layer, or call `adapt` "
"with some sample data."
)
def _ensure_vocab_size_unchanged(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
with tf.init_scope():
new_vocab_size = self.vocabulary_size()
if (
self._frozen_vocab_size is not None
and new_vocab_size != self._frozen_vocab_size
):
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"the vocabulary size cannot be changed after the layer is "
f"called. Old vocab size is {self._frozen_vocab_size}, "
f"new vocab size is {new_vocab_size}"
)
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item
for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if isinstance(data, tf.SparseTensor):
flat_values = data.values
elif isinstance(data, tf.RaggedTensor):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx="int64")
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Args:
token_document_counts: An array of the # of documents each token
appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
def get_null_initializer(key_dtype, value_dtype):
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
return NullInitializer(key_dtype, value_dtype)
def listify_tensors(x):
"""Convert any tensors or numpy arrays to lists for config serialization."""
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, np.ndarray):
x = x.tolist()
return x
| IndexLookup |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 16460,
"end": 18035
} | class ____(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if self.gid_min is not None:
cmd.append('-K')
cmd.append('GID_MIN=' + str(self.gid_min))
if self.gid_max is not None:
cmd.append('-K')
cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
| OpenBsdGroup |
python | walkccc__LeetCode | solutions/2352. Equal Row and Column Pairs/2352.py | {
"start": 0,
"end": 328
} | class ____:
def equalPairs(self, grid: list[list[int]]) -> int:
n = len(grid)
ans = 0
for i in range(n):
for j in range(n):
k = 0
while k < n:
if grid[i][k] != grid[k][j]:
break
k += 1
if k == n: # R[i] == C[j]
ans += 1
return ans
| Solution |
python | gevent__gevent | src/greentest/3.12/test_threading.py | {
"start": 51452,
"end": 57599
} | class ____(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@requires_subinterpreters
def test_threads_join_with_no_main(self):
r_interp, w_interp = self.pipe()
INTERP = b'I'
FINI = b'F'
DONE = b'D'
interp = interpreters.create()
interp.run(f"""if True:
import os
import threading
import time
done = False
def notify_fini():
global done
done = True
os.write({w_interp}, {FINI!r})
t.join()
threading._register_atexit(notify_fini)
def task():
while not done:
time.sleep(0.1)
os.write({w_interp}, {DONE!r})
t = threading.Thread(target=task)
t.start()
os.write({w_interp}, {INTERP!r})
""")
interp.close()
self.assertEqual(os.read(r_interp, 1), INTERP)
self.assertEqual(os.read(r_interp, 1), FINI)
self.assertEqual(os.read(r_interp, 1), DONE)
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
def _check_allowed(self, before_start='', *,
allowed=True,
daemon_allowed=True,
daemon=False,
):
subinterp_code = textwrap.dedent(f"""
import test.support
import threading
def func():
print('this should not have run!')
t = threading.Thread(target=func, daemon={daemon})
{before_start}
t.start()
""")
script = textwrap.dedent(f"""
import test.support
test.support.run_in_subinterp_with_config(
{subinterp_code!r},
use_main_obmalloc=True,
allow_fork=True,
allow_exec=True,
allow_threads={allowed},
allow_daemon_threads={daemon_allowed},
check_multi_interp_extensions=False,
own_gil=False,
)
""")
with test.support.SuppressCrashReport():
_, _, err = assert_python_ok("-c", script)
return err.decode()
@cpython_only
def test_threads_not_allowed(self):
err = self._check_allowed(
allowed=False,
daemon_allowed=False,
daemon=False,
)
self.assertIn('RuntimeError', err)
@cpython_only
def test_daemon_threads_not_allowed(self):
with self.subTest('via Thread()'):
err = self._check_allowed(
allowed=True,
daemon_allowed=False,
daemon=True,
)
self.assertIn('RuntimeError', err)
with self.subTest('via Thread.daemon setter'):
err = self._check_allowed(
't.daemon = True',
allowed=True,
daemon_allowed=False,
daemon=False,
)
self.assertIn('RuntimeError', err)
| SubinterpThreadingTests |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 4734,
"end": 5101
} | class ____(models.Model):
fk = TreeForeignKey(Category, related_name="+", on_delete=models.CASCADE)
one = TreeOneToOneField(Category, related_name="+", on_delete=models.CASCADE)
m2m = TreeManyToManyField(Category, related_name="+")
# for testing various types of inheritance:
# 1. multi-table inheritance, with mptt fields on base class.
| ReferencingModel |
python | huggingface__transformers | src/transformers/models/mpt/configuration_mpt.py | {
"start": 807,
"end": 4577
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate
attention layers according to the specified arguments, defining the layers architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MPT
[mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward
compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
attn_type (`str`, *optional*, defaults to `"multihead_attention"`):
type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`.
attn_pdrop (`float`, *optional*, defaults to `0.0`):
The dropout probability for the attention layers.
attn_impl (`str`, *optional*, defaults to `"torch"`):
The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`.
clip_qkv (`float`, *optional*):
If not `None`, clip the queries, keys, and values in the attention layer to this value.
softmax_scale (`float`, *optional*):
If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to
`1/sqrt(hidden_size)`.
prefix_lm (`bool`, *optional*, defaults to `False`):
Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument
which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another
bi-directionally. Tokens outside the prefix use causal attention.
qk_ln (`bool`, *optional*, defaults to `False`):
Whether to apply layer normalization to the queries and keys in the attention layer.
attn_uses_sequence_id (`bool`, *optional*, defaults to `False`):
Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train`
mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each
token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored.
alibi (`bool`, *optional*, defaults to `True`):
Whether or not to use the alibi bias instead of positional embedding.
alibi_bias_max (`int`, *optional*, defaults to 8):
The maximum value of the alibi bias.
"""
base_config_key = "attn_config"
def __init__(
self,
attn_type="multihead_attention",
attn_pdrop=0,
attn_impl="torch",
clip_qkv=None,
softmax_scale=None,
prefix_lm=False,
qk_ln=False,
attn_uses_sequence_id=False,
alibi=True,
alibi_bias_max=8,
**kwargs,
):
super().__init__()
self.attn_type = attn_type
self.attn_pdrop = attn_pdrop
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.softmax_scale = softmax_scale
self.prefix_lm = prefix_lm
self.attn_uses_sequence_id = attn_uses_sequence_id
self.alibi = alibi
self.qk_ln = qk_ln
self.alibi_bias_max = alibi_bias_max
if attn_type not in ["multihead_attention", "multiquery_attention"]:
raise ValueError(
f"`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}"
)
| MptAttentionConfig |
python | kubernetes-client__python | kubernetes/client/api/storagemigration_v1alpha1_api.py | {
"start": 543,
"end": 125245
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_version_migration(self, body, **kwargs): # noqa: E501
"""create_storage_version_migration # noqa: E501
create a StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_version_migration(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_storage_version_migration_with_http_info(body, **kwargs) # noqa: E501
def create_storage_version_migration_with_http_info(self, body, **kwargs): # noqa: E501
"""create_storage_version_migration # noqa: E501
create a StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_version_migration_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_storage_version_migration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_version_migration(self, **kwargs): # noqa: E501
"""delete_collection_storage_version_migration # noqa: E501
delete collection of StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_version_migration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_storage_version_migration_with_http_info(**kwargs) # noqa: E501
def delete_collection_storage_version_migration_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_storage_version_migration # noqa: E501
delete collection of StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_version_migration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_version_migration(self, name, **kwargs): # noqa: E501
"""delete_storage_version_migration # noqa: E501
delete a StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_version_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_storage_version_migration_with_http_info(name, **kwargs) # noqa: E501
def delete_storage_version_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_storage_version_migration # noqa: E501
delete a StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_version_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_storage_version_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_version_migration(self, **kwargs): # noqa: E501
"""list_storage_version_migration # noqa: E501
list or watch objects of kind StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_version_migration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigrationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_storage_version_migration_with_http_info(**kwargs) # noqa: E501
def list_storage_version_migration_with_http_info(self, **kwargs): # noqa: E501
"""list_storage_version_migration # noqa: E501
list or watch objects of kind StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_version_migration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigrationList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigrationList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_version_migration(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_migration # noqa: E501
partially update the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_migration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_storage_version_migration_with_http_info(name, body, **kwargs) # noqa: E501
def patch_storage_version_migration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_migration # noqa: E501
partially update the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_migration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version_migration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_version_migration_status(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_migration_status # noqa: E501
partially update status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_migration_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_storage_version_migration_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_storage_version_migration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_migration_status # noqa: E501
partially update status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_migration_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_version_migration_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version_migration_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version_migration_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_version_migration(self, name, **kwargs): # noqa: E501
"""read_storage_version_migration # noqa: E501
read the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_storage_version_migration_with_http_info(name, **kwargs) # noqa: E501
def read_storage_version_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""read_storage_version_migration # noqa: E501
read the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_version_migration_status(self, name, **kwargs): # noqa: E501
"""read_storage_version_migration_status # noqa: E501
read status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_migration_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_storage_version_migration_status_with_http_info(name, **kwargs) # noqa: E501
def read_storage_version_migration_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_storage_version_migration_status # noqa: E501
read status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_migration_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_version_migration_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version_migration_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_version_migration(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_migration # noqa: E501
replace the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_migration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_storage_version_migration_with_http_info(name, body, **kwargs) # noqa: E501
def replace_storage_version_migration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_migration # noqa: E501
replace the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_migration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_version_migration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version_migration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_version_migration_status(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_migration_status # noqa: E501
replace status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_migration_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_storage_version_migration_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_storage_version_migration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_migration_status # noqa: E501
replace status of the specified StorageVersionMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_migration_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersionMigration (required)
:param V1alpha1StorageVersionMigration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionMigration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_version_migration_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version_migration_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version_migration_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/storagemigration.k8s.io/v1alpha1/storageversionmigrations/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionMigration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| StoragemigrationV1alpha1Api |
python | numpy__numpy | benchmarks/benchmarks/bench_core.py | {
"start": 2635,
"end": 3094
} | class ____(Benchmark):
def setup(self):
self.amid = np.ones(50000)
self.bmid = np.ones(50000)
self.alarge = np.ones(1000000)
self.blarge = np.ones(1000000)
def time_mid(self):
(self.amid * 2) + self.bmid
def time_mid2(self):
(self.amid + self.bmid) - 2
def time_large(self):
(self.alarge * 2) + self.blarge
def time_large2(self):
(self.alarge + self.blarge) - 2
| Temporaries |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 529400,
"end": 530037
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("ProjectV2ViewEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("ProjectV2View"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ProjectV2ViewConnection |
python | openai__openai-python | src/openai/types/conversations/conversation_item.py | {
"start": 3752,
"end": 4085
} | class ____(BaseModel):
input_schema: object
"""The JSON schema describing the tool's input."""
name: str
"""The name of the tool."""
annotations: Optional[object] = None
"""Additional annotations about the tool."""
description: Optional[str] = None
"""The description of the tool."""
| McpListToolsTool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.