language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pypa__installer
|
tests/test_utils.py
|
{
"start": 3813,
"end": 4061
}
|
class ____:
def test_basic_functionality(self):
data = b"input data is this"
size = len(data)
with BytesIO(data) as source:
result = get_stream_length(source)
assert result == size
|
TestGetStreamLength
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 20655,
"end": 21070
}
|
class ____(_CReferenceDeclaratorBaseNode):
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
|
CReferenceDeclaratorNode
|
python
|
jazzband__django-waffle
|
waffle/tests/test_management.py
|
{
"start": 9177,
"end": 10948
}
|
class ____(TestCase):
def test_create(self):
""" The command should create a new switch. """
name = 'test'
call_command('waffle_switch', name, 'on', create=True)
switch = get_waffle_switch_model().objects.get(name=name, active=True)
switch.delete()
call_command('waffle_switch', name, 'off', create=True)
get_waffle_switch_model().objects.get(name=name, active=False)
def test_not_create(self):
""" The command shouldn't create a new switch if the create flag is
not set.
"""
name = 'test'
with self.assertRaisesRegex(CommandError, 'This switch does not exist.'):
call_command('waffle_switch', name, 'on')
self.assertFalse(get_waffle_switch_model().objects.filter(name=name).exists())
def test_update(self):
""" The command should update an existing switch. """
name = 'test'
switch = get_waffle_switch_model().objects.create(name=name, active=True)
call_command('waffle_switch', name, 'off')
switch.refresh_from_db()
self.assertFalse(switch.active)
call_command('waffle_switch', name, 'on')
switch.refresh_from_db()
self.assertTrue(switch.active)
def test_list(self):
""" The command should list all switches."""
stdout = io.StringIO()
get_waffle_switch_model().objects.create(name='switch1', active=True)
get_waffle_switch_model().objects.create(name='switch2', active=False)
call_command('waffle_switch', list_switches=True, stdout=stdout)
expected = 'Switches:\nswitch1: on\nswitch2: off'
actual = stdout.getvalue().strip()
self.assertEqual(actual, expected)
|
WaffleSwitchManagementCommandTests
|
python
|
sympy__sympy
|
sympy/combinatorics/rewritingsystem_fsm.py
|
{
"start": 0,
"end": 1276
}
|
class ____:
'''
A representation of a state managed by a ``StateMachine``.
Attributes:
name (instance of FreeGroupElement or string) -- State name which is also assigned to the Machine.
transisitons (OrderedDict) -- Represents all the transitions of the state object.
state_type (string) -- Denotes the type (accept/start/dead) of the state.
rh_rule (instance of FreeGroupElement) -- right hand rule for dead state.
state_machine (instance of StateMachine object) -- The finite state machine that the state belongs to.
'''
def __init__(self, name, state_machine, state_type=None, rh_rule=None):
self.name = name
self.transitions = {}
self.state_machine = state_machine
self.state_type = state_type[0]
self.rh_rule = rh_rule
def add_transition(self, letter, state):
'''
Add a transition from the current state to a new state.
Keyword Arguments:
letter -- The alphabet element the current state reads to make the state transition.
state -- This will be an instance of the State object which represents a new state after in the transition after the alphabet is read.
'''
self.transitions[letter] = state
|
State
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_storage_transfer_service.py
|
{
"start": 48320,
"end": 55207
}
|
class ____:
def test_constructor(self):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
project_id=GCP_PROJECT_ID,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
)
assert operator.task_id == TASK_ID
assert operator.source_bucket == GCS_BUCKET_NAME
assert operator.destination_bucket == GCS_BUCKET_NAME
assert operator.project_id == GCP_PROJECT_ID
assert operator.description == DESCRIPTION
assert operator.schedule == SCHEDULE_DICT
# Setting all the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all
# fields
@pytest.mark.db_test
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_templates(self, _, create_task_instance_of_operator, session):
dag_id = "TestGoogleCloudStorageToGoogleCloudStorageTransferOperator_test_templates"
ti = create_task_instance_of_operator(
CloudDataTransferServiceGCSToGCSOperator,
dag_id=dag_id,
source_bucket="{{ dag.dag_id }}",
destination_bucket="{{ dag.dag_id }}",
description="{{ dag.dag_id }}",
object_conditions={"exclude_prefixes": ["{{ dag.dag_id }}"]},
gcp_conn_id="{{ dag.dag_id }}",
task_id=TASK_ID,
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.source_bucket
assert dag_id == ti.task.destination_bucket
assert dag_id == ti.task.description
assert dag_id == ti.task.object_conditions["exclude_prefixes"][0]
assert dag_id == ti.task.gcp_conn_id
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_execute(self, mock_transfer_hook):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
)
operator.execute(None)
mock_transfer_hook.return_value.create_transfer_job.assert_called_once_with(
body=VALID_TRANSFER_JOB_GCS_RAW
)
assert mock_transfer_hook.return_value.wait_for_transfer_job.called
assert not mock_transfer_hook.return_value.delete_transfer_job.called
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_execute_skip_wait(self, mock_transfer_hook):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
description=DESCRIPTION,
wait=False,
schedule=SCHEDULE_DICT,
)
operator.execute(None)
mock_transfer_hook.return_value.create_transfer_job.assert_called_once_with(
body=VALID_TRANSFER_JOB_GCS_RAW
)
assert not mock_transfer_hook.return_value.wait_for_transfer_job.called
assert not mock_transfer_hook.return_value.delete_transfer_job.called
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_execute_delete_job_after_completion(self, mock_transfer_hook):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
wait=True,
delete_job_after_completion=True,
)
operator.execute(None)
mock_transfer_hook.return_value.create_transfer_job.assert_called_once_with(
body=VALID_TRANSFER_JOB_GCS_RAW
)
assert mock_transfer_hook.return_value.wait_for_transfer_job.called
assert mock_transfer_hook.return_value.delete_transfer_job.called
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_execute_should_throw_ex_when_delete_job_without_wait(self, mock_transfer_hook):
with pytest.raises(
AirflowException, match="If 'delete_job_after_completion' is True, then 'wait' must also be True."
):
CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
wait=False,
delete_job_after_completion=True,
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_async_defer_successfully(self, mock_transfer_hook):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
project_id=GCP_PROJECT_ID,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
operator.execute({})
assert isinstance(exc.value.trigger, CloudStorageTransferServiceCheckJobStatusTrigger)
def test_async_execute_successfully(self):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
project_id=GCP_PROJECT_ID,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
deferrable=True,
)
operator.execute_complete(context={}, event={"status": "success"})
def test_async_execute_error(self):
operator = CloudDataTransferServiceGCSToGCSOperator(
task_id=TASK_ID,
source_bucket=GCS_BUCKET_NAME,
destination_bucket=GCS_BUCKET_NAME,
project_id=GCP_PROJECT_ID,
description=DESCRIPTION,
schedule=SCHEDULE_DICT,
deferrable=True,
)
with pytest.raises(AirflowException):
operator.execute_complete(
context={}, event={"status": "error", "message": "test failure message"}
)
|
TestGoogleCloudStorageToGoogleCloudStorageTransferOperator
|
python
|
redis__redis-py
|
redis/commands/search/field.py
|
{
"start": 2123,
"end": 2982
}
|
class ____(Field):
"""
TextField is used to define a text field in a schema definition
"""
NOSTEM = "NOSTEM"
PHONETIC = "PHONETIC"
def __init__(
self,
name: str,
weight: float = 1.0,
no_stem: bool = False,
phonetic_matcher: str = None,
withsuffixtrie: bool = False,
**kwargs,
):
Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
if no_stem:
Field.append_arg(self, self.NOSTEM)
if phonetic_matcher and phonetic_matcher in [
"dm:en",
"dm:fr",
"dm:pt",
"dm:es",
]:
Field.append_arg(self, self.PHONETIC)
Field.append_arg(self, phonetic_matcher)
if withsuffixtrie:
Field.append_arg(self, "WITHSUFFIXTRIE")
|
TextField
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vercel/test_integration.py
|
{
"start": 20892,
"end": 22349
}
|
class ____(TestCase):
def test_asdict(self) -> None:
assert metadata.asdict() == {
"description": "Vercel is an all-in-one platform with Global CDN supporting static & JAMstack deployment and Serverless Functions.",
"features": [
{
"description": "Connect your Sentry and Vercel projects to automatically upload source maps and notify Sentry of new releases being deployed.",
"featureGate": "integrations-deployment",
}
],
"author": "The Sentry Team",
"noun": "Installation",
"issue_url": "https://github.com/getsentry/sentry/issues/new?assignees=&labels=Component:%20Integrations&template=bug.yml&title=Vercel%20Integration%20Problem",
"source_url": "https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/vercel",
"aspects": {
"configure_integration": {"title": "Connect Your Projects"},
"externalInstall": {
"url": "https://vercel.com/integrations/sentry/add",
"buttonText": "Vercel Marketplace",
"noticeText": "Visit the Vercel Marketplace to install this integration. After installing the Sentry integration, you'll be redirected back to Sentry to finish syncing Vercel and Sentry projects.",
},
},
}
|
VercelIntegrationMetadataTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/queues.py
|
{
"start": 26132,
"end": 27791
}
|
class ____(Request):
"""
Deletes a queue. If the queue is not empty and force is not set to true, queue will not be deleted.
:param queue: Queue id
:type queue: str
:param force: Force delete of non-empty queue. Defaults to false
:type force: bool
"""
_service = "queues"
_action = "delete"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Force delete of non-empty queue. Defaults to false",
"type": "boolean",
},
"queue": {"description": "Queue id", "type": "string"},
},
"required": ["queue"],
"type": "object",
}
def __init__(self, queue: str, force: Optional[bool] = False, **kwargs: Any) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.queue = queue
self.force = force
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
|
DeleteRequest
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_metrics_meta.py
|
{
"start": 2639,
"end": 4535
}
|
class ____(OrganizationEventsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
"""Return the total sum of metrics data, the null transactions and unparameterized transactions
This is so the frontend can have an idea given its current selection of projects how good/bad the display would
be
"""
def get(self, request: Request, organization: Organization) -> Response:
data = {
"sum": {
"metrics": None,
"metrics_null": None,
"metrics_unparam": None,
},
}
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response(data)
with handle_query_errors():
sum_metrics = metrics_performance.query(
selected_columns=[COUNT_UNPARAM, COUNT_NULL, "count()"],
snuba_params=snuba_params,
query="",
referrer="api.organization-events-metrics-compatibility.sum_metrics",
functions_acl=["count_unparameterized_transactions", "count_null_transactions"],
use_aggregate_conditions=True,
)
if len(sum_metrics["data"]) > 0:
metrics_count = sum_metrics["data"][0].get("count")
if metrics_count == 0:
set_tag("empty_metrics", True)
data["sum"].update(
{
"metrics": metrics_count,
"metrics_null": sum_metrics["data"][0].get(get_function_alias(COUNT_NULL)),
"metrics_unparam": sum_metrics["data"][0].get(
get_function_alias(COUNT_UNPARAM)
),
}
)
return Response(data)
|
OrganizationMetricsCompatibilitySums
|
python
|
getsentry__sentry
|
src/sentry/integrations/services/assignment_source.py
|
{
"start": 353,
"end": 1230
}
|
class ____:
source_name: str
integration_id: int
queued: datetime = timezone.now()
@classmethod
def from_integration(cls, integration: Integration | RpcIntegration) -> AssignmentSource:
return AssignmentSource(
source_name=integration.name,
integration_id=integration.id,
)
def to_dict(self) -> dict[str, Any]:
payload = asdict(self)
payload["queued"] = payload["queued"].isoformat()
return payload
@classmethod
def from_dict(cls, input_dict: dict[str, Any]) -> AssignmentSource | None:
try:
if "queued" in input_dict and isinstance(input_dict["queued"], str):
input_dict["queued"] = datetime.fromisoformat(input_dict["queued"])
return cls(**input_dict)
except (ValueError, TypeError):
return None
|
AssignmentSource
|
python
|
run-llama__llama_index
|
llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py
|
{
"start": 1154,
"end": 1262
}
|
class ____(ToolCallArgsEvent, Event):
type: EventType = EventType.TOOL_CALL_ARGS
|
ToolCallArgsWorkflowEvent
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-word-with-all-prefixes.py
|
{
"start": 98,
"end": 1126
}
|
class ____(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
def iter_dfs(words, node):
result = -1
stk = [node]
while stk:
node = stk.pop()
if result == -1 or len(words[node["_end"]]) > len(words[result]):
result = node["_end"]
for c in reversed(string.ascii_lowercase):
if c not in node or "_end" not in node[c]:
continue
stk.append(node[c])
return result
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
trie["_end"] = -1
for i, word in enumerate(words):
reduce(dict.__getitem__, word, trie)["_end"] = i
result = iter_dfs(words, trie)
return words[result] if result != -1 else ""
# Time: O(n)
# Space: O(t), t is the number of nodes in trie
import collections
import string
|
Solution
|
python
|
falconry__falcon
|
tests/test_buffered_reader.py
|
{
"start": 150,
"end": 420
}
|
class ____(io.BytesIO):
def read(self, size=None):
if size is None or size == -1:
raise WouldHang('unbounded read()')
result = super().read(size)
if not result:
raise WouldHang('EOF')
return result
|
GlitchyStream
|
python
|
kamyu104__LeetCode-Solutions
|
Python/set-intersection-size-at-least-two.py
|
{
"start": 33,
"end": 625
}
|
class ____(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key = lambda s_e: (s_e[0], -s_e[1]))
cnts = [2] * len(intervals)
result = 0
while intervals:
(start, _), cnt = intervals.pop(), cnts.pop()
for s in xrange(start, start+cnt):
for i in xrange(len(intervals)):
if cnts[i] and s <= intervals[i][1]:
cnts[i] -= 1
result += cnt
return result
|
Solution
|
python
|
google__pytype
|
pytype/pytd/booleq.py
|
{
"start": 5705,
"end": 7054
}
|
class ____(BooleanTerm):
"""A conjunction of equalities and disjunctions.
External code should use And rather than creating an _And instance directly.
"""
__slots__ = ("exprs",)
def __init__(self, exprs):
"""Initialize a conjunction.
Args:
exprs: A set. The subterms.
"""
self.exprs = exprs
def __eq__(self, other):
return self.__class__ == other.__class__ and self.exprs == other.exprs
def __repr__(self):
return f"And({list(self.exprs)!r})"
def __str__(self):
return "(" + " & ".join(str(t) for t in self.exprs) + ")"
def __hash__(self):
return _expr_set_hash(self.exprs)
def simplify(self, assignments):
return simplify_exprs(
(e.simplify(assignments) for e in self.exprs), _And, FALSE, TRUE
)
def extract_pivots(self, assignments):
"""Extract the pivots. See BooleanTerm.extract_pivots()."""
pivots = {} # dict of frozenset
for expr in self.exprs:
expr_pivots = expr.extract_pivots(assignments)
for name, values in expr_pivots.items():
if name in pivots:
pivots[name] = pivots[name] & values
else:
pivots[name] = values
return {var: values for var, values in pivots.items() if values}
def extract_equalities(self):
return tuple(chain(expr.extract_equalities() for expr in self.exprs))
|
_And
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 19130,
"end": 20482
}
|
class ____(
OfflineCompressTestCaseWithContextGenerator
):
"""
Test that the offline manifest is independent of STATIC_URL.
I.e. users can use the manifest with any other STATIC_URL in the future.
"""
templates_dir = "test_static_url_independence"
expected_hash = "b0bfc3754fd4"
additional_test_settings = {
"STATIC_URL": "/custom/static/url/",
# We use ``COMPRESS_OFFLINE_CONTEXT`` generator to make sure that
# ``STATIC_URL`` is not cached when rendering the template.
"COMPRESS_OFFLINE_CONTEXT": (
"compressor.tests.test_offline.static_url_context_generator"
),
}
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
self.assertEqual(self._render_template(engine), self._render_result(result))
# Changing STATIC_URL setting doesn't break things despite that
# offline compression was made with different STATIC_URL.
with self.settings(STATIC_URL="/another/static/url/"):
self.assertEqual(self._render_template(engine), self._render_result(result))
|
OfflineCompressStaticUrlIndependenceTestCase
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_mutating_webhook.py
|
{
"start": 383,
"end": 23045
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admission_review_versions': 'list[str]',
'client_config': 'AdmissionregistrationV1WebhookClientConfig',
'failure_policy': 'str',
'match_conditions': 'list[V1MatchCondition]',
'match_policy': 'str',
'name': 'str',
'namespace_selector': 'V1LabelSelector',
'object_selector': 'V1LabelSelector',
'reinvocation_policy': 'str',
'rules': 'list[V1RuleWithOperations]',
'side_effects': 'str',
'timeout_seconds': 'int'
}
attribute_map = {
'admission_review_versions': 'admissionReviewVersions',
'client_config': 'clientConfig',
'failure_policy': 'failurePolicy',
'match_conditions': 'matchConditions',
'match_policy': 'matchPolicy',
'name': 'name',
'namespace_selector': 'namespaceSelector',
'object_selector': 'objectSelector',
'reinvocation_policy': 'reinvocationPolicy',
'rules': 'rules',
'side_effects': 'sideEffects',
'timeout_seconds': 'timeoutSeconds'
}
def __init__(self, admission_review_versions=None, client_config=None, failure_policy=None, match_conditions=None, match_policy=None, name=None, namespace_selector=None, object_selector=None, reinvocation_policy=None, rules=None, side_effects=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
"""V1MutatingWebhook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admission_review_versions = None
self._client_config = None
self._failure_policy = None
self._match_conditions = None
self._match_policy = None
self._name = None
self._namespace_selector = None
self._object_selector = None
self._reinvocation_policy = None
self._rules = None
self._side_effects = None
self._timeout_seconds = None
self.discriminator = None
self.admission_review_versions = admission_review_versions
self.client_config = client_config
if failure_policy is not None:
self.failure_policy = failure_policy
if match_conditions is not None:
self.match_conditions = match_conditions
if match_policy is not None:
self.match_policy = match_policy
self.name = name
if namespace_selector is not None:
self.namespace_selector = namespace_selector
if object_selector is not None:
self.object_selector = object_selector
if reinvocation_policy is not None:
self.reinvocation_policy = reinvocation_policy
if rules is not None:
self.rules = rules
self.side_effects = side_effects
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def admission_review_versions(self):
"""Gets the admission_review_versions of this V1MutatingWebhook. # noqa: E501
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
:return: The admission_review_versions of this V1MutatingWebhook. # noqa: E501
:rtype: list[str]
"""
return self._admission_review_versions
@admission_review_versions.setter
def admission_review_versions(self, admission_review_versions):
"""Sets the admission_review_versions of this V1MutatingWebhook.
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
:param admission_review_versions: The admission_review_versions of this V1MutatingWebhook. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and admission_review_versions is None: # noqa: E501
raise ValueError("Invalid value for `admission_review_versions`, must not be `None`") # noqa: E501
self._admission_review_versions = admission_review_versions
@property
def client_config(self):
"""Gets the client_config of this V1MutatingWebhook. # noqa: E501
:return: The client_config of this V1MutatingWebhook. # noqa: E501
:rtype: AdmissionregistrationV1WebhookClientConfig
"""
return self._client_config
@client_config.setter
def client_config(self, client_config):
"""Sets the client_config of this V1MutatingWebhook.
:param client_config: The client_config of this V1MutatingWebhook. # noqa: E501
:type: AdmissionregistrationV1WebhookClientConfig
"""
if self.local_vars_configuration.client_side_validation and client_config is None: # noqa: E501
raise ValueError("Invalid value for `client_config`, must not be `None`") # noqa: E501
self._client_config = client_config
@property
def failure_policy(self):
"""Gets the failure_policy of this V1MutatingWebhook. # noqa: E501
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:return: The failure_policy of this V1MutatingWebhook. # noqa: E501
:rtype: str
"""
return self._failure_policy
@failure_policy.setter
def failure_policy(self, failure_policy):
"""Sets the failure_policy of this V1MutatingWebhook.
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:param failure_policy: The failure_policy of this V1MutatingWebhook. # noqa: E501
:type: str
"""
self._failure_policy = failure_policy
@property
def match_conditions(self):
"""Gets the match_conditions of this V1MutatingWebhook. # noqa: E501
MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped # noqa: E501
:return: The match_conditions of this V1MutatingWebhook. # noqa: E501
:rtype: list[V1MatchCondition]
"""
return self._match_conditions
@match_conditions.setter
def match_conditions(self, match_conditions):
"""Sets the match_conditions of this V1MutatingWebhook.
MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped # noqa: E501
:param match_conditions: The match_conditions of this V1MutatingWebhook. # noqa: E501
:type: list[V1MatchCondition]
"""
self._match_conditions = match_conditions
@property
def match_policy(self):
"""Gets the match_policy of this V1MutatingWebhook. # noqa: E501
matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
:return: The match_policy of this V1MutatingWebhook. # noqa: E501
:rtype: str
"""
return self._match_policy
@match_policy.setter
def match_policy(self, match_policy):
"""Sets the match_policy of this V1MutatingWebhook.
matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
:param match_policy: The match_policy of this V1MutatingWebhook. # noqa: E501
:type: str
"""
self._match_policy = match_policy
@property
def name(self):
"""Gets the name of this V1MutatingWebhook. # noqa: E501
The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
:return: The name of this V1MutatingWebhook. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1MutatingWebhook.
The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
:param name: The name of this V1MutatingWebhook. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace_selector(self):
"""Gets the namespace_selector of this V1MutatingWebhook. # noqa: E501
:return: The namespace_selector of this V1MutatingWebhook. # noqa: E501
:rtype: V1LabelSelector
"""
return self._namespace_selector
@namespace_selector.setter
def namespace_selector(self, namespace_selector):
"""Sets the namespace_selector of this V1MutatingWebhook.
:param namespace_selector: The namespace_selector of this V1MutatingWebhook. # noqa: E501
:type: V1LabelSelector
"""
self._namespace_selector = namespace_selector
@property
def object_selector(self):
"""Gets the object_selector of this V1MutatingWebhook. # noqa: E501
:return: The object_selector of this V1MutatingWebhook. # noqa: E501
:rtype: V1LabelSelector
"""
return self._object_selector
@object_selector.setter
def object_selector(self, object_selector):
"""Sets the object_selector of this V1MutatingWebhook.
:param object_selector: The object_selector of this V1MutatingWebhook. # noqa: E501
:type: V1LabelSelector
"""
self._object_selector = object_selector
@property
def reinvocation_policy(self):
"""Gets the reinvocation_policy of this V1MutatingWebhook. # noqa: E501
reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: the webhook will not be called more than once in a single admission evaluation. IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. Defaults to \"Never\". # noqa: E501
:return: The reinvocation_policy of this V1MutatingWebhook. # noqa: E501
:rtype: str
"""
return self._reinvocation_policy
@reinvocation_policy.setter
def reinvocation_policy(self, reinvocation_policy):
"""Sets the reinvocation_policy of this V1MutatingWebhook.
reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: the webhook will not be called more than once in a single admission evaluation. IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. Defaults to \"Never\". # noqa: E501
:param reinvocation_policy: The reinvocation_policy of this V1MutatingWebhook. # noqa: E501
:type: str
"""
self._reinvocation_policy = reinvocation_policy
@property
def rules(self):
"""Gets the rules of this V1MutatingWebhook. # noqa: E501
Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
:return: The rules of this V1MutatingWebhook. # noqa: E501
:rtype: list[V1RuleWithOperations]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1MutatingWebhook.
Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
:param rules: The rules of this V1MutatingWebhook. # noqa: E501
:type: list[V1RuleWithOperations]
"""
self._rules = rules
@property
def side_effects(self):
"""Gets the side_effects of this V1MutatingWebhook. # noqa: E501
SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
:return: The side_effects of this V1MutatingWebhook. # noqa: E501
:rtype: str
"""
return self._side_effects
@side_effects.setter
def side_effects(self, side_effects):
"""Sets the side_effects of this V1MutatingWebhook.
SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
:param side_effects: The side_effects of this V1MutatingWebhook. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and side_effects is None: # noqa: E501
raise ValueError("Invalid value for `side_effects`, must not be `None`") # noqa: E501
self._side_effects = side_effects
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this V1MutatingWebhook. # noqa: E501
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:return: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1MutatingWebhook.
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1MutatingWebhook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1MutatingWebhook):
return True
return self.to_dict() != other.to_dict()
|
V1MutatingWebhook
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 803460,
"end": 826821
}
|
class ____(FieldChannelMixin, core.PositionFieldDefBase):
r"""
Theta schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
stack : bool, :class:`StackOffset`, Literal['zero', 'center', 'normalize'], None
Type of stacking offset if the field should be stacked. ``stack`` is only applicable
for ``x``, ``y``, ``theta``, and ``radius`` channels with continuous domains. For
example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* ``"zero"`` or ``true``: stacking with baseline offset at zero value of the scale
(for creating typical stacked `bar
<https://vega.github.io/vega-lite/docs/stack.html#bar>`__ and `area
<https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__ and pie charts
`with percentage tooltip
<https://vega.github.io/vega-lite/docs/arc.html#tooltip>`__).
* ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__).
* ``null`` or ``false`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar``, ``area``, or ``arc``; (2) the stacked measure channel (x or
y) has a linear scale; (3) At least one of non-position channels mapped to an
unaggregated field that is different from x and y. Otherwise, ``null`` by default.
**See also:** `stack <https://vega.github.io/vega-lite/docs/stack.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "theta"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Theta: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Theta: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Theta: ...
@overload
def bandPosition(self, _: float, /) -> Theta: ...
@overload
def bin(self, _: bool | Bin | Literal["binned"] | None, /) -> Theta: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Theta: ...
@overload
def field(self, _: str | RepeatRef, /) -> Theta: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Theta: ...
@overload
def scale(self, _: Scale | None, /) -> Theta: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> Theta: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> Theta: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Theta: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Theta: ...
@overload
def stack(self, _: bool | StackOffset_T | None, /) -> Theta: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Theta: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Theta: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Theta: ...
@overload
def type(self, _: StandardType_T, /) -> Theta: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
stack: Optional[bool | SchemaBase | StackOffset_T | None] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
scale=scale,
sort=sort,
stack=stack,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
|
Theta
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/ec.py
|
{
"start": 9485,
"end": 9704
}
|
class ____(EllipticCurve):
name = "brainpoolP384r1"
key_size = 384
group_order = 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565 # noqa: E501
|
BrainpoolP384R1
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 52083,
"end": 52506
}
|
class ____(TypedDict, total=False):
type: Required[Literal['include-exclude-sequence']]
include: set[int]
exclude: set[int]
def filter_seq_schema(*, include: set[int] | None = None, exclude: set[int] | None = None) -> IncExSeqSerSchema:
return _dict_not_none(type='include-exclude-sequence', include=include, exclude=exclude)
IncExSeqOrElseSerSchema = Union[IncExSeqSerSchema, SerSchema]
|
IncExSeqSerSchema
|
python
|
mlflow__mlflow
|
mlflow/genai/judges/optimizers/dspy_utils.py
|
{
"start": 3156,
"end": 12276
}
|
class ____(dspy.BaseLM):
"""Special DSPy LM for Databricks environment using managed RAG client."""
def __init__(self):
super().__init__("databricks")
def dump_state(self):
return {}
def load_state(self, state):
pass
def forward(
self, prompt: str | None = None, messages: list[dict[str, Any]] | None = None, **kwargs
) -> AttrDict[str, Any]:
"""Forward pass for the language model."""
user_prompt = None
system_prompt = None
if messages:
for message in messages:
if message.get("role") == "user":
user_prompt = message.get("content", "")
elif message.get("role") == "system":
system_prompt = message.get("content", "")
if not user_prompt and prompt:
user_prompt = prompt
return _process_chat_completions(user_prompt, system_prompt)
def _sanitize_assessment_name(name: str) -> str:
"""
Sanitize a name by converting it to lowercase and stripping whitespace.
"""
return name.lower().strip()
def convert_mlflow_uri_to_litellm(model_uri: str) -> str:
"""
Convert MLflow model URI format to LiteLLM format.
MLflow uses URIs like 'openai:/gpt-4' while LiteLLM expects 'openai/gpt-4'.
Args:
model_uri: MLflow model URI (e.g., 'openai:/gpt-4')
Returns:
LiteLLM-compatible model string (e.g., 'openai/gpt-4')
"""
try:
scheme, path = _parse_model_uri(model_uri)
return f"{scheme}/{path}"
except Exception as e:
raise MlflowException(f"Failed to convert MLflow URI to LiteLLM format: {e}")
def convert_litellm_to_mlflow_uri(litellm_model: str) -> str:
"""
Convert LiteLLM model format to MLflow URI format.
LiteLLM uses formats like 'openai/gpt-4' while MLflow expects 'openai:/gpt-4'.
Args:
litellm_model: LiteLLM model string (e.g., 'openai/gpt-4')
Returns:
MLflow-compatible model URI (e.g., 'openai:/gpt-4')
Raises:
MlflowException: If the model string is not in the expected format
Examples:
>>> convert_litellm_to_mlflow_uri("openai/gpt-4")
'openai:/gpt-4'
>>> convert_litellm_to_mlflow_uri("anthropic/claude-3")
'anthropic:/claude-3'
"""
if not litellm_model:
raise MlflowException(
"Model string cannot be empty or None",
error_code=INVALID_PARAMETER_VALUE,
)
if "/" not in litellm_model:
raise MlflowException(
f"Invalid LiteLLM model format: '{litellm_model}'. "
"Expected format: 'provider/model' (e.g., 'openai/gpt-4')",
error_code=INVALID_PARAMETER_VALUE,
)
try:
provider, model = litellm_model.split("/", 1)
if not provider or not model:
raise MlflowException(
f"Invalid LiteLLM model format: '{litellm_model}'. "
"Both provider and model name must be non-empty",
error_code=INVALID_PARAMETER_VALUE,
)
return f"{provider}:/{model}"
except ValueError as e:
raise MlflowException(f"Failed to convert LiteLLM format to MLflow URI: {e}")
def trace_to_dspy_example(trace: Trace, judge: Judge) -> Optional["dspy.Example"]:
"""
Convert MLflow trace to DSPy example format.
Extracts:
- inputs/outputs from trace spans
- expected result from human assessments
- rationale from assessment feedback
Args:
trace: MLflow trace object
judge: Judge instance to find assessments for
Returns:
DSPy example object or None if conversion fails
"""
try:
judge_input_fields = judge.get_input_fields()
judge_requires_trace = any(field.name == "trace" for field in judge_input_fields)
judge_requires_inputs = any(field.name == "inputs" for field in judge_input_fields)
judge_requires_outputs = any(field.name == "outputs" for field in judge_input_fields)
judge_requires_expectations = any(
field.name == "expectations" for field in judge_input_fields
)
request = extract_request_from_trace(trace)
response = extract_response_from_trace(trace)
expectations = extract_expectations_from_trace(trace)
# Check for missing required fields
if not request and judge_requires_inputs:
_logger.warning(f"Missing required request in trace {trace.info.trace_id}")
return None
elif not response and judge_requires_outputs:
_logger.warning(f"Missing required response in trace {trace.info.trace_id}")
return None
elif not expectations and judge_requires_expectations:
_logger.warning(f"Missing required expectations in trace {trace.info.trace_id}")
return None
# Find human assessment for this judge
expected_result = None
if trace.info.assessments:
# Sort assessments by creation time (most recent first) then process
sorted_assessments = sorted(
trace.info.assessments,
key=lambda a: (
a.create_time_ms if hasattr(a, "create_time_ms") and a.create_time_ms else 0
),
reverse=True,
)
for assessment in sorted_assessments:
sanitized_assessment_name = _sanitize_assessment_name(assessment.name)
sanitized_judge_name = _sanitize_assessment_name(judge.name)
if (
sanitized_assessment_name == sanitized_judge_name
and assessment.source.source_type == AssessmentSourceType.HUMAN
):
expected_result = assessment
break
if not expected_result:
_logger.warning(
f"No human assessment found for judge '{judge.name}' in trace {trace.info.trace_id}"
)
return None
if not expected_result.feedback:
_logger.warning(f"No feedback found in assessment for trace {trace.info.trace_id}")
return None
# Create DSPy example
example_kwargs = {}
example_inputs = []
if judge_requires_trace:
example_kwargs["trace"] = trace
example_inputs.append("trace")
if judge_requires_inputs:
example_kwargs["inputs"] = request
example_inputs.append("inputs")
if judge_requires_outputs:
example_kwargs["outputs"] = response
example_inputs.append("outputs")
if judge_requires_expectations:
example_kwargs["expectations"] = expectations
example_inputs.append("expectations")
example = dspy.Example(
result=str(expected_result.feedback.value).lower(),
rationale=expected_result.rationale or "",
**example_kwargs,
)
# Set inputs (what the model should use as input)
return example.with_inputs(*example_inputs)
except Exception as e:
_logger.error(f"Failed to create DSPy example from trace: {e}")
return None
def create_dspy_signature(judge: "Judge") -> "dspy.Signature":
"""
Create DSPy signature for judge evaluation.
Args:
judge: The judge to create signature for
Returns:
DSPy signature object
"""
try:
# Build signature fields dictionary using the judge's field definitions
signature_fields = {}
# Get input fields from the judge
input_fields = judge.get_input_fields()
for field in input_fields:
signature_fields[field.name] = (
field.value_type,
dspy.InputField(desc=field.description),
)
# Get output fields from the judge
output_fields = judge.get_output_fields()
for field in output_fields:
signature_fields[field.name] = (
field.value_type,
dspy.OutputField(desc=field.description),
)
return dspy.make_signature(signature_fields, judge.instructions)
except Exception as e:
raise MlflowException(f"Failed to create DSPy signature: {e}")
def agreement_metric(example: "dspy.Example", pred: Any, trace: Any | None = None):
"""Simple agreement metric for judge optimization."""
try:
# Extract result from example and prediction
expected = getattr(example, "result", None)
predicted = getattr(pred, "result", None)
if expected is None or predicted is None:
return False
# Normalize both to consistent format
expected_norm = str(expected).lower().strip()
predicted_norm = str(predicted).lower().strip()
_logger.debug(f"expected_norm: {expected_norm}, predicted_norm: {predicted_norm}")
return expected_norm == predicted_norm
except Exception as e:
_logger.warning(f"Error in agreement_metric: {e}")
return False
|
AgentEvalLM
|
python
|
donnemartin__interactive-coding-challenges
|
staging/graphs_trees/binary_tree/test_binary_search_tree.py
|
{
"start": 17,
"end": 2238
}
|
class ____(unittest.TestCase):
def test_insert_traversals (self):
myTree = BinaryTree()
myTree2 = BinaryTree()
for num in [50, 30, 70, 10, 40, 60, 80, 7, 25, 38]:
myTree.insert(num)
[myTree2.insert(num) for num in range (1, 100, 10)]
print("Test: insert checking with in order traversal")
expectVal = [7, 10, 25, 30, 38, 40, 50, 60, 70, 80]
self.assertEqual(myTree.printInOrder(), expectVal)
expectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]
self.assertEqual(myTree2.printInOrder(), expectVal)
print("Test: insert checking with post order traversal")
expectVal = [7, 25, 10, 38, 40, 30, 60, 80, 70, 50]
self.assertEqual(myTree.printPostOrder(), expectVal)
expectVal = [91, 81, 71, 61, 51, 41, 31, 21, 11, 1]
self.assertEqual(myTree2.printPostOrder(), expectVal)
print("Test: insert checking with pre order traversal")
expectVal = [50, 30, 10, 7, 25, 40, 38, 70, 60, 80]
self.assertEqual(myTree.printPreOrder(), expectVal)
expectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]
self.assertEqual(myTree2.printPreOrder(), expectVal)
print("Success: test_insert_traversals")
def test_max_min_nodes (self):
myTree = BinaryTree()
myTree.insert(5)
myTree.insert(1)
myTree.insert(21)
print("Test: max node")
self.assertEqual(myTree.maxNode(), 21)
myTree.insert(32)
self.assertEqual(myTree.maxNode(), 32)
print("Test: min node")
self.assertEqual(myTree.minNode(), 1)
print("Test: min node inserting negative number")
myTree.insert(-10)
self.assertEqual(myTree.minNode(), -10)
print("Success: test_max_min_nodes")
def test_delete (self):
myTree = BinaryTree()
myTree.insert(5)
print("Test: delete")
myTree.delete(5)
self.assertEqual(myTree.treeIsEmpty(), True)
print("Test: more complex deletions")
[myTree.insert(x) for x in range(1, 5)]
myTree.delete(2)
self.assertEqual(myTree.root.rightChild.data, 3)
print("Test: delete invalid value")
self.assertEqual(myTree.delete(100), False)
print("Success: test_delete")
def main():
testing = TestBinaryTree()
testing.test_insert_traversals()
testing.test_max_min_nodes()
testing.test_delete()
if __name__=='__main__':
main()
|
TestBinaryTree
|
python
|
ansible__ansible
|
lib/ansible/modules/package_facts.py
|
{
"start": 13350,
"end": 17328
}
|
class ____(CLIMgr):
CLI = 'pkg_info'
def list_installed(self):
rc, out, err = module.run_command([self._cli, '-a'])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
raw_pkg_details = {'name': package, 'version': ''}
details = package.split(maxsplit=1)[0].rsplit('-', maxsplit=1)
try:
return {
'name': details[0],
'version': details[1],
}
except IndexError:
return raw_pkg_details
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = sorted([x.lower() for x in PKG_MANAGERS.keys()])
# add aliases
PKG_MANAGER_NAMES.extend([alias for alist in ALIASES.values() for alias in alist])
# start work
global module
# choices are not set for 'manager' as they are computed dynamically and validated below instead of in argspec
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if strategy == 'first' and found:
break
# substitute aliases for aliased
for aliased in ALIASES.keys():
if pkgmgr in ALIASES[aliased]:
pkgmgr = aliased
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
manager = PKG_MANAGERS[pkgmgr]()
try:
packages_found = {}
if manager.is_available(handle_exceptions=False):
try:
packages_found = manager.get_packages()
except Exception as e:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
# only consider 'found' if it results in something
if packages_found:
found += 1
for k in packages_found.keys():
if k in packages:
packages[k].extend(packages_found[k])
else:
packages[k] = packages_found[k]
else:
module.warn('Found "%s" but no associated packages' % (pkgmgr))
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
PKG_INFO
|
python
|
tiangolo__fastapi
|
docs_src/path_operation_configuration/tutorial002_py39.py
|
{
"start": 104,
"end": 575
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: set[str] = set()
@app.post("/items/", response_model=Item, tags=["items"])
async def create_item(item: Item):
return item
@app.get("/items/", tags=["items"])
async def read_items():
return [{"name": "Foo", "price": 42}]
@app.get("/users/", tags=["users"])
async def read_users():
return [{"username": "johndoe"}]
|
Item
|
python
|
dagster-io__dagster
|
examples/project_analytics/dagster_pypi/resources.py
|
{
"start": 803,
"end": 942
}
|
class ____(ConfigurableResource):
def get_pypi_download_counts(self, _) -> pd.DataFrame:
raise NotImplementedError()
|
PyPiResource
|
python
|
pytorch__pytorch
|
torch/cpu/amp/grad_scaler.py
|
{
"start": 84,
"end": 958
}
|
class ____(torch.amp.GradScaler):
r"""
See :class:`torch.amp.GradScaler`.
``torch.cpu.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cpu", args...)`` instead.
"""
@deprecated(
"`torch.cpu.amp.GradScaler(args...)` is deprecated. "
"Please use `torch.amp.GradScaler('cpu', args...)` instead.",
category=FutureWarning,
)
def __init__(
self,
init_scale: float = 2.0**16,
growth_factor: float = 2.0,
backoff_factor: float = 0.5,
growth_interval: int = 2000,
enabled: bool = True,
) -> None:
super().__init__(
"cpu",
init_scale=init_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
enabled=enabled,
)
|
GradScaler
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_real_transforms.py
|
{
"start": 11611,
"end": 11752
}
|
class ____(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
|
TestIDCTIIFloat
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 66460,
"end": 67820
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.BindInput` ``TypedDict`` wrapper.
Parameters
----------
autocomplete
A hint for form autofill. See the `HTML autocomplete attribute
<https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete>`__ for
additional information.
debounce
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
element
An optional CSS selector string indicating the parent element to which the input
element should be added. By default, all input elements are added within the parent
container of the Vega view.
input
The type of input element to use. The valid values are ``"checkbox"``, ``"radio"``,
``"range"``, ``"select"``, and any other legal `HTML form input type
<https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input>`__.
name
By default, the signal name is used to label input elements. This ``name`` property
can be used instead to specify a custom label for the bound signal.
placeholder
Text that appears in the form control when it has no value set.
"""
autocomplete: str
debounce: float
element: str
input: str
name: str
placeholder: str
|
BindInputKwds
|
python
|
mahmoud__boltons
|
boltons/dictutils.py
|
{
"start": 3798,
"end": 23160
}
|
class ____(dict):
"""A MultiDict is a dictionary that can have multiple values per key
and the OrderedMultiDict (OMD) is a MultiDict that retains
original insertion order. Common use cases include:
* handling query strings parsed from URLs
* inverting a dictionary to create a reverse index (values to keys)
* stacking data from multiple dictionaries in a non-destructive way
The OrderedMultiDict constructor is identical to the built-in
:class:`dict`, and overall the API constitutes an intuitive
superset of the built-in type:
>>> omd = OrderedMultiDict()
>>> omd['a'] = 1
>>> omd['b'] = 2
>>> omd.add('a', 3)
>>> omd.get('a')
3
>>> omd.getlist('a')
[1, 3]
Some non-:class:`dict`-like behaviors also make an appearance,
such as support for :func:`reversed`:
>>> list(reversed(omd))
['b', 'a']
Note that unlike some other MultiDicts, this OMD gives precedence
to the most recent value added. ``omd['a']`` refers to ``3``, not
``1``.
>>> omd
OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> omd.poplast('a')
3
>>> omd
OrderedMultiDict([('a', 1), ('b', 2)])
>>> omd.pop('a')
1
>>> omd
OrderedMultiDict([('b', 2)])
If you want a safe-to-modify or flat dictionary, use
:meth:`OrderedMultiDict.todict()`.
>>> from pprint import pprint as pp # preserve printed ordering
>>> omd = OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> pp(omd.todict())
{'a': 3, 'b': 2}
>>> pp(omd.todict(multi=True))
{'a': [1, 3], 'b': [2]}
With ``multi=False``, items appear with the keys in to original
insertion order, alongside the most-recently inserted value for
that key.
>>> OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)]).items(multi=False)
[('a', 3), ('b', 2)]
.. warning::
``dict(omd)`` changed behavior `in Python 3.7
<https://bugs.python.org/issue34320>`_ due to changes made to
support the transition from :class:`collections.OrderedDict` to
the built-in dictionary being ordered. Before 3.7, the result
would be a new dictionary, with values that were lists, similar
to ``omd.todict(multi=True)`` (but only shallow-copy; the lists
were direct references to OMD internal structures). From 3.7
onward, the values became singular, like
``omd.todict(multi=False)``. For reliable cross-version
behavior, just use :meth:`~OrderedMultiDict.todict()`.
"""
def __new__(cls, *a, **kw):
ret = super().__new__(cls)
ret._clear_ll()
return ret
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError('%s expected at most 1 argument, got %s'
% (self.__class__.__name__, len(args)))
super().__init__()
if args:
self.update_extend(args[0])
if kwargs:
self.update(kwargs)
def __getstate__(self):
return list(self.iteritems(multi=True))
def __setstate__(self, state):
self.clear()
self.update_extend(state)
def _clear_ll(self):
try:
_map = self._map
except AttributeError:
_map = self._map = {}
self.root = []
_map.clear()
self.root[:] = [self.root, self.root, None]
def _insert(self, k, v):
root = self.root
cells = self._map.setdefault(k, [])
last = root[PREV]
cell = [last, root, k, v]
last[NEXT] = root[PREV] = cell
cells.append(cell)
def add(self, k, v):
"""Add a single value *v* under a key *k*. Existing values under *k*
are preserved.
"""
values = super().setdefault(k, [])
self._insert(k, v)
values.append(v)
def addlist(self, k, v):
"""Add an iterable of values underneath a specific key, preserving
any values already under that key.
>>> omd = OrderedMultiDict([('a', -1)])
>>> omd.addlist('a', range(3))
>>> omd
OrderedMultiDict([('a', -1), ('a', 0), ('a', 1), ('a', 2)])
Called ``addlist`` for consistency with :meth:`getlist`, but
tuples and other sequences and iterables work.
"""
if not v:
return
self_insert = self._insert
values = super().setdefault(k, [])
for subv in v:
self_insert(k, subv)
values.extend(v)
def get(self, k, default=None):
"""Return the value for key *k* if present in the dictionary, else
*default*. If *default* is not given, ``None`` is returned.
This method never raises a :exc:`KeyError`.
To get all values under a key, use :meth:`OrderedMultiDict.getlist`.
"""
return super().get(k, [default])[-1]
def getlist(self, k, default=_MISSING):
"""Get all values for key *k* as a list, if *k* is in the
dictionary, else *default*. The list returned is a copy and
can be safely mutated. If *default* is not given, an empty
:class:`list` is returned.
"""
try:
return super().__getitem__(k)[:]
except KeyError:
if default is _MISSING:
return []
return default
def clear(self):
"Empty the dictionary."
super().clear()
self._clear_ll()
def setdefault(self, k, default=_MISSING):
"""If key *k* is in the dictionary, return its value. If not, insert
*k* with a value of *default* and return *default*. *default*
defaults to ``None``. See :meth:`dict.setdefault` for more
information.
"""
if not super().__contains__(k):
self[k] = None if default is _MISSING else default
return self[k]
def copy(self):
"Return a shallow copy of the dictionary."
return self.__class__(self.iteritems(multi=True))
@classmethod
def fromkeys(cls, keys, default=None):
"""Create a dictionary from a list of keys, with all the values
set to *default*, or ``None`` if *default* is not set.
"""
return cls([(k, default) for k in keys])
def update(self, E, **F):
"""Add items from a dictionary or iterable (and/or keyword arguments),
overwriting values under an existing key. See
:meth:`dict.update` for more details.
"""
# E and F are throwback names to the dict() __doc__
if E is self:
return
self_add = self.add
if isinstance(E, OrderedMultiDict):
for k in E:
if k in self:
del self[k]
for k, v in E.iteritems(multi=True):
self_add(k, v)
elif callable(getattr(E, 'keys', None)):
for k in E.keys():
self[k] = E[k]
else:
seen = set()
seen_add = seen.add
for k, v in E:
if k not in seen and k in self:
del self[k]
seen_add(k)
self_add(k, v)
for k in F:
self[k] = F[k]
return
def update_extend(self, E, **F):
"""Add items from a dictionary, iterable, and/or keyword
arguments without overwriting existing items present in the
dictionary. Like :meth:`update`, but adds to existing keys
instead of overwriting them.
"""
if E is self:
iterator = iter(E.items())
elif isinstance(E, OrderedMultiDict):
iterator = E.iteritems(multi=True)
elif hasattr(E, 'keys'):
iterator = ((k, E[k]) for k in E.keys())
else:
iterator = E
self_add = self.add
for k, v in iterator:
self_add(k, v)
def __setitem__(self, k, v):
if super().__contains__(k):
self._remove_all(k)
self._insert(k, v)
super().__setitem__(k, [v])
def __getitem__(self, k):
return super().__getitem__(k)[-1]
def __delitem__(self, k):
super().__delitem__(k)
self._remove_all(k)
def __eq__(self, other):
if self is other:
return True
try:
if len(other) != len(self):
return False
except TypeError:
return False
if isinstance(other, OrderedMultiDict):
selfi = self.iteritems(multi=True)
otheri = other.iteritems(multi=True)
zipped_items = zip_longest(selfi, otheri, fillvalue=(None, None))
for (selfk, selfv), (otherk, otherv) in zipped_items:
if selfk != otherk or selfv != otherv:
return False
if not(next(selfi, _MISSING) is _MISSING
and next(otheri, _MISSING) is _MISSING):
# leftovers (TODO: watch for StopIteration?)
return False
return True
elif hasattr(other, 'keys'):
for selfk in self:
try:
other[selfk] == self[selfk]
except KeyError:
return False
return True
return False
def __ne__(self, other):
return not (self == other)
def __ior__(self, other):
self.update(other)
return self
def pop(self, k, default=_MISSING):
"""Remove all values under key *k*, returning the most-recently
inserted value. Raises :exc:`KeyError` if the key is not
present and no *default* is provided.
"""
try:
return self.popall(k)[-1]
except KeyError:
if default is _MISSING:
raise KeyError(k)
return default
def popall(self, k, default=_MISSING):
"""Remove all values under key *k*, returning them in the form of
a list. Raises :exc:`KeyError` if the key is not present and no
*default* is provided.
"""
super_self = super()
if super_self.__contains__(k):
self._remove_all(k)
if default is _MISSING:
return super_self.pop(k)
return super_self.pop(k, default)
def poplast(self, k=_MISSING, default=_MISSING):
"""Remove and return the most-recently inserted value under the key
*k*, or the most-recently inserted key if *k* is not
provided. If no values remain under *k*, it will be removed
from the OMD. Raises :exc:`KeyError` if *k* is not present in
the dictionary, or the dictionary is empty.
"""
if k is _MISSING:
if self:
k = self.root[PREV][KEY]
else:
if default is _MISSING:
raise KeyError('empty %r' % type(self))
return default
try:
self._remove(k)
except KeyError:
if default is _MISSING:
raise KeyError(k)
return default
values = super().__getitem__(k)
v = values.pop()
if not values:
super().__delitem__(k)
return v
def _remove(self, k):
values = self._map[k]
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
if not values:
del self._map[k]
def _remove_all(self, k):
values = self._map[k]
while values:
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
del self._map[k]
def iteritems(self, multi=False):
"""Iterate over the OMD's items in insertion order. By default,
yields only the most-recently inserted value for each key. Set
*multi* to ``True`` to get all inserted items.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY], curr[VALUE]
curr = curr[NEXT]
else:
for key in self.iterkeys():
yield key, self[key]
def iterkeys(self, multi=False):
"""Iterate over the OMD's keys in insertion order. By default, yields
each key once, according to the most recent insertion. Set
*multi* to ``True`` to get all keys, including duplicates, in
insertion order.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
else:
yielded = set()
yielded_add = yielded.add
while curr is not root:
k = curr[KEY]
if k not in yielded:
yielded_add(k)
yield k
curr = curr[NEXT]
def itervalues(self, multi=False):
"""Iterate over the OMD's values in insertion order. By default,
yields the most-recently inserted value per unique key. Set
*multi* to ``True`` to get all values according to insertion
order.
"""
for k, v in self.iteritems(multi=multi):
yield v
def todict(self, multi=False):
"""Gets a basic :class:`dict` of the items in this dictionary. Keys
are the same as the OMD, values are the most recently inserted
values for each key.
Setting the *multi* arg to ``True`` is yields the same
result as calling :class:`dict` on the OMD, except that all the
value lists are copies that can be safely mutated.
"""
if multi:
return {k: self.getlist(k) for k in self}
return {k: self[k] for k in self}
def sorted(self, key=None, reverse=False):
"""Similar to the built-in :func:`sorted`, except this method returns
a new :class:`OrderedMultiDict` sorted by the provided key
function, optionally reversed.
Args:
key (callable): A callable to determine the sort key of
each element. The callable should expect an **item**
(key-value pair tuple).
reverse (bool): Set to ``True`` to reverse the ordering.
>>> omd = OrderedMultiDict(zip(range(3), range(3)))
>>> omd.sorted(reverse=True)
OrderedMultiDict([(2, 2), (1, 1), (0, 0)])
Note that the key function receives an **item** (key-value
tuple), so the recommended signature looks like:
>>> omd = OrderedMultiDict(zip('hello', 'world'))
>>> omd.sorted(key=lambda i: i[1]) # i[0] is the key, i[1] is the val
OrderedMultiDict([('o', 'd'), ('l', 'l'), ('e', 'o'), ('l', 'r'), ('h', 'w')])
"""
cls = self.__class__
return cls(sorted(self.iteritems(multi=True), key=key, reverse=reverse))
def sortedvalues(self, key=None, reverse=False):
"""Returns a copy of the :class:`OrderedMultiDict` with the same keys
in the same order as the original OMD, but the values within
each keyspace have been sorted according to *key* and
*reverse*.
Args:
key (callable): A single-argument callable to determine
the sort key of each element. The callable should expect
an **item** (key-value pair tuple).
reverse (bool): Set to ``True`` to reverse the ordering.
>>> omd = OrderedMultiDict()
>>> omd.addlist('even', [6, 2])
>>> omd.addlist('odd', [1, 5])
>>> omd.add('even', 4)
>>> omd.add('odd', 3)
>>> somd = omd.sortedvalues()
>>> somd.getlist('even')
[2, 4, 6]
>>> somd.keys(multi=True) == omd.keys(multi=True)
True
>>> omd == somd
False
>>> somd
OrderedMultiDict([('even', 2), ('even', 4), ('odd', 1), ('odd', 3), ('even', 6), ('odd', 5)])
As demonstrated above, contents and key order are
retained. Only value order changes.
"""
try:
superself_iteritems = super().iteritems()
except AttributeError:
superself_iteritems = super().items()
# (not reverse) because they pop off in reverse order for reinsertion
sorted_val_map = {k: sorted(v, key=key, reverse=(not reverse))
for k, v in superself_iteritems}
ret = self.__class__()
for k in self.iterkeys(multi=True):
ret.add(k, sorted_val_map[k].pop())
return ret
def inverted(self):
"""Returns a new :class:`OrderedMultiDict` with values and keys
swapped, like creating dictionary transposition or reverse
index. Insertion order is retained and all keys and values
are represented in the output.
>>> omd = OMD([(0, 2), (1, 2)])
>>> omd.inverted().getlist(2)
[0, 1]
Inverting twice yields a copy of the original:
>>> omd.inverted().inverted()
OrderedMultiDict([(0, 2), (1, 2)])
"""
return self.__class__((v, k) for k, v in self.iteritems(multi=True))
def counts(self):
"""Returns a mapping from key to number of values inserted under that
key. Like :py:class:`collections.Counter`, but returns a new
:class:`OrderedMultiDict`.
"""
# Returns an OMD because Counter/OrderedDict may not be
# available, and neither Counter nor dict maintain order.
super_getitem = super().__getitem__
return self.__class__((k, len(super_getitem(k))) for k in self)
def keys(self, multi=False):
"""Returns a list containing the output of :meth:`iterkeys`. See
that method's docs for more details.
"""
return list(self.iterkeys(multi=multi))
def values(self, multi=False):
"""Returns a list containing the output of :meth:`itervalues`. See
that method's docs for more details.
"""
return list(self.itervalues(multi=multi))
def items(self, multi=False):
"""Returns a list containing the output of :meth:`iteritems`. See
that method's docs for more details.
"""
return list(self.iteritems(multi=multi))
def __iter__(self):
return self.iterkeys()
def __reversed__(self):
root = self.root
curr = root[PREV]
lengths = {}
lengths_sd = lengths.setdefault
get_values = super().__getitem__
while curr is not root:
k = curr[KEY]
vals = get_values(k)
if lengths_sd(k, 1) == len(vals):
yield k
lengths[k] += 1
curr = curr[PREV]
def __repr__(self):
cn = self.__class__.__name__
kvs = ', '.join([repr((k, v)) for k, v in self.iteritems(multi=True)])
return f'{cn}([{kvs}])'
def viewkeys(self):
"OMD.viewkeys() -> a set-like object providing a view on OMD's keys"
return KeysView(self)
def viewvalues(self):
"OMD.viewvalues() -> an object providing a view on OMD's values"
return ValuesView(self)
def viewitems(self):
"OMD.viewitems() -> a set-like object providing a view on OMD's items"
return ItemsView(self)
# A couple of convenient aliases
OMD = OrderedMultiDict
MultiDict = OrderedMultiDict
|
OrderedMultiDict
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_multiple_wrapping.py
|
{
"start": 1102,
"end": 2450
}
|
class ____(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_multiple_wrapping(self, device):
"""
This test simulates wrapping the module after training to run inference.
This is required in cases where later in a session, the model is wrapped again in FSDP but
contains nested FSDP wrappers within the module.
"""
inner_model = InnerModel(device)
model = FSDP(inner_model).to(device_type.type)
optim = SGD(model.parameters(), lr=0.1)
for _ in range(3):
input = torch.rand((1, 5), dtype=torch.float).to(device_type.type)
input.requires_grad = True
output = model(input)
output.sum().backward()
optim.step()
optim.zero_grad()
input = torch.rand((1, 5), dtype=torch.float).to(device_type.type)
output = model(input)
# second time to rewrap the inner model
# rewrapped_model = FSDP(inner_model, device_id=device)
rewrapped_model = FSDP(inner_model).to(device_type.type)
rewrapped_output = rewrapped_model(input)
self.assertEqual(output, rewrapped_output)
devices = ("cuda", "hpu", "xpu")
instantiate_device_type_tests(
TestMultipleWrapping, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
run_tests()
|
TestMultipleWrapping
|
python
|
doocs__leetcode
|
solution/2000-2099/2042.Check if Numbers Are Ascending in a Sentence/Solution.py
|
{
"start": 0,
"end": 264
}
|
class ____:
def areNumbersAscending(self, s: str) -> bool:
pre = 0
for t in s.split():
if t[0].isdigit():
if (cur := int(t)) <= pre:
return False
pre = cur
return True
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/templates_test.py
|
{
"start": 1376,
"end": 1844
}
|
class ____(gast.NodeTransformer):
def __init__(self, test_instance, expected_ctx):
self.at_top_level = True
self.test_instance = test_instance
self.expected_ctx = expected_ctx
def visit(self, node):
if hasattr(node, 'ctx'):
self.test_instance.assertIsInstance(node.ctx, self.expected_ctx)
if self.at_top_level:
self.at_top_level = False
self.expected_ctx = gast.Load
return super(_CtxChecker, self).visit(node)
|
_CtxChecker
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/assets.py
|
{
"start": 651,
"end": 808
}
|
class ____(graphene.Union):
class Meta:
types = (GrapheneAssetConnection, GraphenePythonError)
name = "AssetsOrError"
|
GrapheneAssetsOrError
|
python
|
pypa__warehouse
|
warehouse/oidc/interfaces.py
|
{
"start": 457,
"end": 1746
}
|
class ____(Interface):
def verify_jwt_signature(
unverified_token: str, issuer_url: str
) -> SignedClaims | None:
"""
Verify the given JWT's signature, returning its signed claims if
valid. If the signature is invalid, `None` is returned.
This method does **not** verify the claim set itself -- the API
consumer is responsible for evaluating the claim set.
"""
pass
def find_publisher(
signed_claims: SignedClaims, *, pending: bool = False
) -> OIDCPublisher | PendingOIDCPublisher | None:
"""
Given a mapping of signed claims produced by `verify_jwt_signature`,
attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`
that matches them, depending on the value of `pending`.
If no publisher matches the claims, `None` is returned.
"""
pass
def reify_pending_publisher(
pending_publisher: PendingOIDCPublisher, project: Project
) -> OIDCPublisher:
"""
Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,
adding it to the given project (presumed newly created) in the process.
Returns the reified publisher.
"""
pass
|
IOIDCPublisherService
|
python
|
spack__spack
|
lib/spack/spack/spec.py
|
{
"start": 223048,
"end": 223154
}
|
class ____(spack.error.SpecError):
"""Called for errors in Spec format strings."""
|
SpecFormatStringError
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/test_batch.py
|
{
"start": 5101,
"end": 6389
}
|
class ____:
@pytest.fixture
def expectation(self) -> Expectation:
return gxe.ExpectColumnValuesToNotBeNull(column="vendor_id", mostly=0.95)
@pytest.mark.filesystem
def test_boolean_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
expectation: Expectation,
):
_, batch = pandas_setup
result = batch.validate(expectation, result_format="BOOLEAN_ONLY")
assert result.success
assert len(result.result) == 0
@pytest.mark.filesystem
def test_summary_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
expectation: Expectation,
):
_, batch = pandas_setup
summary_result = batch.validate(expectation, result_format="SUMMARY")
assert summary_result.success
assert len(summary_result.result) > 0
@pytest.mark.filesystem
def test_complete_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
expectation: Expectation,
):
_, batch = pandas_setup
result = batch.validate(expectation, result_format="COMPLETE")
assert result.success
assert "unexpected_index_list" in result.result
|
TestBatchValidateExpectation
|
python
|
walkccc__LeetCode
|
solutions/3112. Minimum Time to Visit Disappearing Nodes/3112.py
|
{
"start": 0,
"end": 855
}
|
class ____:
def minimumTime(
self,
n: int,
edges: list[list[int]],
disappear: list[int],
) -> list[int]:
graph = [[] for _ in range(n)]
for u, v, w in edges:
graph[u].append((v, w))
graph[v].append((u, w))
return self._dijkstra(graph, 0, disappear)
def _dijkstra(
self,
graph: list[list[tuple[int, int]]],
src: int,
disappear: list[int],
) -> list[int]:
dist = [math.inf] * len(graph)
dist[src] = 0
minHeap = [(dist[src], src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
for v, w in graph[u]:
if d + w < disappear[v] and d + w < dist[v]:
dist[v] = d + w
heapq.heappush(minHeap, (dist[v], v))
return [d if d != math.inf else -1
for d in dist]
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_session_create_request.py
|
{
"start": 588,
"end": 5219
}
|
class ____(BaseModel):
type: Literal["realtime"]
"""The type of session to create. Always `realtime` for the Realtime API."""
audio: Optional[RealtimeAudioConfig] = None
"""Configuration for input and output audio."""
include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
instructions: Optional[str] = None
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
],
None,
] = None
"""The Realtime model used for this session."""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""The set of modalities the model can respond with.
It defaults to `["audio"]`, indicating that the model will respond with audio
plus a transcript. `["text"]` can be used to make the model respond with text
only. It is not possible to request both `text` and `audio` at the same time.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: Optional[RealtimeToolChoiceConfig] = None
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: Optional[RealtimeToolsConfig] = None
"""Tools available to the model."""
tracing: Optional[RealtimeTracingConfig] = None
"""
Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
truncation: Optional[RealtimeTruncation] = None
"""
When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs. Clients can configure truncation behavior to truncate with a
lower max token limit, which is an effective way to control token usage and
cost. Truncation will reduce the number of cached tokens on the next turn
(busting the cache), since messages are dropped from the beginning of the
context. However, clients can also configure truncation to retain messages up to
a fraction of the maximum context size, which will reduce the need for future
truncations and thus improve the cache rate. Truncation can be disabled
entirely, which means the server will never truncate but would instead return an
error if the conversation exceeds the model's input token limit.
"""
|
RealtimeSessionCreateRequest
|
python
|
doocs__leetcode
|
solution/2900-2999/2950.Number of Divisible Substrings/Solution.py
|
{
"start": 0,
"end": 465
}
|
class ____:
def countDivisibleSubstrings(self, word: str) -> int:
d = ["ab", "cde", "fgh", "ijk", "lmn", "opq", "rst", "uvw", "xyz"]
mp = {}
for i, s in enumerate(d, 1):
for c in s:
mp[c] = i
ans = 0
n = len(word)
for i in range(n):
s = 0
for j in range(i, n):
s += mp[word[j]]
ans += s % (j - i + 1) == 0
return ans
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/gradient_input_output_exclusions_test.py
|
{
"start": 980,
"end": 1977
}
|
class ____(test.TestCase):
def testGeneratedFileMatchesHead(self):
expected_contents = gradient_input_output_exclusions.get_contents()
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
resource_loader.get_path_to_datafile("pywrap_gradient_exclusions.cc"))
actual_contents = file_io.read_file_to_string(filename)
# On windows, one or both of these strings may have CRLF line endings.
# To make sure, sanitize both:
sanitized_actual_contents = actual_contents.replace("\r", "")
sanitized_expected_contents = expected_contents.replace("\r", "")
self.assertEqual(
sanitized_actual_contents, sanitized_expected_contents, """
pywrap_gradient_exclusions.cc needs to be updated.
Please regenerate using:
bazel run tensorflow/python/eager:gen_gradient_input_output_exclusions -- $PWD/tensorflow/python/eager/pywrap_gradient_exclusions.cc"""
)
if __name__ == "__main__":
test.main()
|
GradientInputOutputExclusionsTest
|
python
|
marshmallow-code__marshmallow
|
src/marshmallow/fields.py
|
{
"start": 60446,
"end": 61069
}
|
class ____(String):
"""An email field.
:param args: The same positional arguments that :class:`String` receives.
:param kwargs: The same keyword arguments that :class:`String` receives.
"""
#: Default error messages.
default_error_messages = {"invalid": "Not a valid email address."}
def __init__(self, **kwargs: Unpack[_BaseFieldKwargs]) -> None:
super().__init__(**kwargs)
# Insert validation into self.validators so that multiple errors can be stored.
validator = validate.Email(error=self.error_messages["invalid"])
self.validators.insert(0, validator)
|
Email
|
python
|
pyodide__pyodide
|
tools/backport.py
|
{
"start": 4507,
"end": 6137
}
|
class ____:
"""A paragraph grouping of changelog entries separated by blank lines.
Introduced by a line starting with a -. Ended by a blank line, or ### or ##.
header:
Probably empty?
entries:
The list of entries.
cur_entry:
Parser state.
"""
header: list[str] = field(default_factory=list)
entries: list[ChangelogEntry] = field(default_factory=list)
cur_entry: ChangelogEntry = field(default_factory=ChangelogEntry)
def get_text(self) -> str:
"""Unparse the paragraph"""
header = ""
if self.header:
header = "\n".join(self.header) + "\n"
res = header + "".join(x.get_text() for x in self.entries)
# Special case: if the last entry already ends in a blank line, we don't
# add another one. This keeps the spacing more consistent with the
# backported entries.
if not res.endswith("\n\n"):
res += "\n"
return res
def __bool__(self) -> bool:
return bool(self.header or self.entries or self.cur_entry)
def append(self, line: str) -> None:
"""Main parsing logic."""
if line.startswith("-"):
self.finish_entry()
if self.cur_entry or line.startswith("-"):
self.cur_entry.append(line)
else:
self.header.append(line)
def finish_entry(self) -> None:
"""If cur_entry is nonempty, add it to entries. Then empty out cur_entry"""
if self.cur_entry:
self.entries.append(self.cur_entry)
self.cur_entry = ChangelogEntry()
@dataclass
|
ChangelogParagraph
|
python
|
Textualize__textual
|
tests/command_palette/test_click_away.py
|
{
"start": 94,
"end": 303
}
|
class ____(Provider):
async def search(self, query: str) -> Hits:
def goes_nowhere_does_nothing() -> None:
pass
yield Hit(1, query, goes_nowhere_does_nothing, query)
|
SimpleSource
|
python
|
apache__airflow
|
task-sdk/tests/task_sdk/definitions/test_operator_resources.py
|
{
"start": 890,
"end": 1616
}
|
class ____:
def test_resource_eq(self):
r = Resources(cpus=0.1, ram=2048)
assert r not in [{}, [], None]
assert r == r
r2 = Resources(cpus=0.1, ram=2048)
assert r == r2
assert r2 == r
r3 = Resources(cpus=0.2, ram=2048)
assert r != r3
def test_to_dict(self):
r = Resources(cpus=0.1, ram=2048, disk=1024, gpus=1)
assert r.to_dict() == {
"cpus": {"name": "CPU", "qty": 0.1, "units_str": "core(s)"},
"ram": {"name": "RAM", "qty": 2048, "units_str": "MB"},
"disk": {"name": "Disk", "qty": 1024, "units_str": "MB"},
"gpus": {"name": "GPU", "qty": 1, "units_str": "gpu(s)"},
}
|
TestResources
|
python
|
sympy__sympy
|
sympy/integrals/transforms.py
|
{
"start": 36021,
"end": 38727
}
|
class ____(FourierTypeTransform):
"""
Class representing unevaluated inverse Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Fourier transforms, see the
:func:`inverse_fourier_transform` docstring.
"""
_name = 'Inverse Fourier'
def a(self):
return 1
def b(self):
return 2*S.Pi
def inverse_fourier_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse Fourier transform of `F`,
defined as
.. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseFourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_fourier_transform, exp, sqrt, pi
>>> from sympy.abc import x, k
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x)
exp(-x**2)
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False)
(exp(-x**2), True)
See Also
========
fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseFourierTransform(F, k, x).doit(**hints)
##########################################################################
# Fourier Sine and Cosine Transform
##########################################################################
@_noconds_(True)
def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True):
"""
Compute a general sine or cosine-type transform
F(k) = a int_0^oo b*sin(x*k) f(x) dx.
F(k) = a int_0^oo b*cos(x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard sine/cosine
and inverse sine/cosine transforms.
"""
F = integrate(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
|
InverseFourierTransform
|
python
|
pandas-dev__pandas
|
pandas/tests/io/parser/test_textreader.py
|
{
"start": 834,
"end": 12091
}
|
class ____:
@pytest.fixture
def csv_path(self, datapath):
return datapath("io", "data", "csv", "test1.csv")
def test_file_handle(self, csv_path):
with open(csv_path, "rb") as f:
reader = TextReader(f, **_na_value_kwargs)
reader.read()
def test_file_handle_mmap(self, csv_path):
# this was never using memory_map=True
with open(csv_path, "rb") as f:
reader = TextReader(f, header=None, **_na_value_kwargs)
reader.read()
def test_StringIO(self, csv_path):
with open(csv_path, "rb") as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None, **_na_value_kwargs)
reader.read()
def test_encoding_mismatch_warning(self, csv_path):
# GH-57954
with open(csv_path, encoding="UTF-8") as f:
msg = "latin1 is different from the encoding"
with pytest.raises(ValueError, match=msg):
read_csv(f, encoding="latin1")
def test_string_factorize(self):
# should this be optional?
data = "a\nb\na\nb\na"
reader = TextReader(StringIO(data), header=None, **_na_value_kwargs)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = "a, b\na, b\na, b\na, b"
reader = TextReader(
StringIO(data), skipinitialspace=True, header=None, **_na_value_kwargs
)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b", "b"], dtype=np.object_)
)
def test_parse_booleans(self):
data = "True\nFalse\nTrue\nTrue"
reader = TextReader(StringIO(data), header=None, **_na_value_kwargs)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(
StringIO(data), delim_whitespace=True, header=None, **_na_value_kwargs
)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b"], dtype=np.object_)
)
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None, **_na_value_kwargs)
result = reader.read()
expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = "12345,67\n345,678"
reader = TextReader(
StringIO(data), delimiter=":", decimal=",", header=None, **_na_value_kwargs
)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = "123,456\n12,500"
reader = TextReader(
StringIO(data),
delimiter=":",
thousands=",",
header=None,
**_na_value_kwargs,
)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = "123.456\n12.500"
reader = TextFileReader(
StringIO(data), delimiter=":", thousands=".", header=None
)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r"
reader = TextReader(
StringIO(data), delimiter=":", header=None, **_na_value_kwargs
)
msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4"
with pytest.raises(parser.ParserError, match=msg):
reader.read()
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
on_bad_lines=2, # Skip
**_na_value_kwargs,
)
result = reader.read()
expected = {
0: np.array(["a", "d", "g", "l"], dtype=object),
1: np.array(["b", "e", "h", "m"], dtype=object),
2: np.array(["c", "f", "i", "n"], dtype=object),
}
assert_array_dicts_equal(result, expected)
with tm.assert_produces_warning(ParserWarning, match="Skipping line"):
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
on_bad_lines=1, # Warn
**_na_value_kwargs,
)
reader.read()
def test_header_not_enough_lines(self):
data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6"
reader = TextReader(StringIO(data), delimiter=",", header=2, **_na_value_kwargs)
header = reader.header
expected = [["a", "b", "c"]]
assert header == expected
recs = reader.read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64),
}
assert_array_dicts_equal(recs, expected)
def test_escapechar(self):
data = '\\"hello world"\n\\"hello world"\n\\"hello world"'
reader = TextReader(
StringIO(data),
delimiter=",",
header=None,
escapechar="\\",
**_na_value_kwargs,
)
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
if "dtype" in kwds:
kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(
StringIO(data), delimiter=",", header=None, **kwds, **_na_value_kwargs
)
reader = _make_reader(dtype="S5,i4")
result = reader.read()
assert result[0].dtype == "S5"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5")
assert (result[0] == ex_values).all()
assert result[1].dtype == "i4"
reader = _make_reader(dtype="S4")
result = reader.read()
assert result[0].dtype == "S4"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4")
assert (result[0] == ex_values).all()
assert result[1].dtype == "S4"
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
if "dtype" in kwds:
kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(StringIO(data), delimiter=",", **kwds, **_na_value_kwargs)
reader = _make_reader(dtype={"one": "u1", 1: "S1"})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "S1"
reader = _make_reader(dtype={"one": np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", **kwds, **_na_value_kwargs)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
@pytest.mark.parametrize(
"text, kwargs",
[
("a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12", {"delimiter": ","}),
(
"a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12",
{"delim_whitespace": True},
),
("a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12", {"delimiter": ","}),
(
(
"A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r"
"AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r"
",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0"
),
{"delimiter": ","},
),
("A B C\r 2 3\r4 5 6", {"delim_whitespace": True}),
("A B C\r2 3\r4 5 6", {"delim_whitespace": True}),
],
)
def test_cr_delimited(self, text, kwargs):
nice_text = text.replace("\r", "\r\n")
result = TextReader(StringIO(text), **kwargs, **_na_value_kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs, **_na_value_kwargs).read()
assert_array_dicts_equal(result, expected)
def test_empty_field_eof(self):
data = "a,b,c\n1,2,3\n4,,"
result = TextReader(StringIO(data), delimiter=",", **_na_value_kwargs).read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array(["2", ""], dtype=object),
2: np.array(["3", ""], dtype=object),
}
assert_array_dicts_equal(result, expected)
@pytest.mark.parametrize("repeat", range(10))
def test_empty_field_eof_mem_access_bug(self, repeat):
# GH5664
a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1])
c = DataFrame(
[
[1, 2, 3, 4],
[6, np.nan, np.nan, np.nan],
[8, 9, 10, 11],
[13, 14, np.nan, np.nan],
],
columns=list("abcd"),
index=[0, 5, 7, 12],
)
df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c")
tm.assert_frame_equal(df, a)
df = read_csv(
StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c"
)
tm.assert_frame_equal(df, b)
df = read_csv(
StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"),
names=list("abcd"),
engine="c",
)
tm.assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
with read_csv(
StringIO(), chunksize=20, header=None, names=["a", "b", "c"]
) as df:
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in left.items():
tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))
|
TestTextReader
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_distance_to_address_to_be_between.py
|
{
"start": 6883,
"end": 13675
}
|
class ____(ColumnMapExpectation):
"""Expect that column values as geometry points to be between a certain distance from a geocoded object.
expect_column_values_geometry_distance_to_address_to_be_between is a \
[Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
Args:
column (str): \
The column name. \
Column values must be provided in lon-lat or lat-long tuples/lists, WKT or WKB format, which are commom formats for GIS Database formats. \
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL. \
Values must be in longitude - latitude coordinates for this method to work with geocoding.
Keyword Args:
place (str): \
The country, place, address, etc. to query. Expect to return a point from geocoder (Default: OpenStreetMaps (Nominatim)). \
Note that this method uses the Latitude - Longitude Point returned by the geocoder, and not the shape(geometry)
column_shape_format (str): \
Geometry format for 'column' (wkt, wkb, lonlat, latlon). Column values can be provided in WKT or WKB format, which are commom formats for GIS Database formats. \
latlon or lonlat also supports tuple pairs or list pairs in either Longtitude or Latitude first formats. \
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL.
geocoder (str): \
Geocoder from GeoPy to use to return the shape. While this is generic, the api is required to be available from GeoPy and must return a geometry.
geocoder_config (dict str): \
arguments to initialize the GeoPy geocoder. e.g. for paid services, an API_key is usually required. See GeoPy for reference.
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Notes:
* The user is responsible to transform the column to a WKT or WKB format that is in the WGS84 coordianate system for earth.
* Any geometry can be provided in the column value, but if it is not a POINT, the centroid will be evaluated.
* Invalid geometries import as None and fail the test.
* Other Coordinate Reference Systems are not yet supported.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"point_array": [
"POINT(-75.682729 45.4437213)",
"POINT(-75.6987003 45.4237274)",
"POINT(-75.69660 45.428057)",
"POINT(-75.668226 45.321924)",
"POINT(-75.702662 45.420936)",
],
},
"tests": [
{
"title": "positive_test_with_points",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "point_array",
"place": "24 Sussex Drive, Ottawa, ON",
"min_value": None,
"max_value": 25,
"units": "km",
},
"out": {
"success": True,
},
},
{
"title": "negative_test_with_points",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "point_array",
"place": "24 Sussex Drive, Ottawa, ON",
"min_value": 1,
"max_value": 5,
"units": "miles",
},
"out": {"success": False, "unexpected_index_list": [0, 3]},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.geometry.distance_to_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"column_shape_format",
"place",
"geocoder",
"geocoder_config",
"min_value",
"max_value",
"units",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1,
"column_shape_format": "wkt",
"geocoder": "nominatim",
"geocoder_config": dict(user_agent="great_expectations.hacakthon-2022"),
"units": "km",
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
min_val = None
max_val = None
if "min_value" in configuration.kwargs:
min_val = configuration.kwargs["min_value"]
if "max_value" in configuration.kwargs:
max_val = configuration.kwargs["max_value"]
assert min_val is not None or max_val is not None, (
"min_value and max_value cannot both be None"
)
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-2022",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@pjdobson", # Don't forget to add your github handle here!
],
"requirements": ["pygeos", "geopy"],
}
if __name__ == "__main__":
ExpectColumnValuesGeometryDistanceToAddressToBeBetween().print_diagnostic_checklist()
|
ExpectColumnValuesGeometryDistanceToAddressToBeBetween
|
python
|
Textualize__textual
|
src/textual/demo/widgets.py
|
{
"start": 19599,
"end": 20474
}
|
class ____(containers.VerticalGroup):
DEFAULT_CLASSES = "column"
TREES_MD = """\
## Tree
The Tree widget displays hierarchical data.
There is also the Tree widget's cousin, DirectoryTree, to navigate folders and files on the filesystem.
"""
DEFAULT_CSS = """
Trees {
Tree {
height: 16;
padding: 1;
&.-maximized { height: 1fr; }
border: wide transparent;
&:focus { border: wide $border; }
}
VerticalGroup {
}
}
"""
def compose(self) -> ComposeResult:
yield Markdown(self.TREES_MD)
with containers.VerticalGroup():
tree = Tree("80s movies")
tree.show_root = False
tree.add_json(MOVIES_TREE)
tree.root.expand()
yield tree
|
Trees
|
python
|
skorch-dev__skorch
|
skorch/tests/callbacks/test_scoring.py
|
{
"start": 31727,
"end": 34879
}
|
class ____:
"""This test is about the possibility to control cache usage globally
See this issue for more context:
https://github.com/skorch-dev/skorch/issues/957
"""
@pytest.fixture
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.mark.parametrize('net_use_caching', ['auto', True, False])
def test_net_overrides_caching(
self, net_cls, classifier_module, classifier_data, net_use_caching
):
from skorch.callbacks import BatchScoring, EpochScoring
call_count = 0
class MyNet(net_cls):
def infer(self, x, **kwargs):
nonlocal call_count
call_count += 1
return super().infer(x, **kwargs)
X, y = classifier_data
X, y = X[:40], y[:40] # small amount of data is sufficient
batch_size = 4
max_epochs = 3
# calculation of expected call count of infer
# net:
# 40 samples with a batch size of 4 => 10 calls to net.infer per epoch
# 3 epochs => 30 calls as a baseline
# callbacks:
# 32 samples for train => 8 calls if on_train=True => 24 for 3 epochs
# 8 samples for valid => 2 calls if on_train=False => 6 for 3 epochs
callbacks = [
# this callback adds 24 calls
BatchScoring(
scoring='f1',
use_caching=False,
on_train=True,
),
# this callback adds 6 calls
BatchScoring(
scoring='accuracy',
use_caching=True,
on_train=False,
),
# this callback adds 24 calls
EpochScoring(
scoring='recall',
use_caching=True,
on_train=True,
),
# this callback adds 6 calls
EpochScoring(
scoring='precision',
use_caching=False,
on_train=False,
),
]
net = MyNet(
classifier_module,
batch_size=batch_size,
max_epochs=max_epochs,
callbacks=callbacks,
use_caching=net_use_caching,
# turn off default scorer to not mess with the numbers
callbacks__valid_acc=None,
)
net.fit(X, y)
if net_use_caching == 'auto':
assert call_count == 30 + 24 + 0 + 0 + 6
elif net_use_caching is True:
assert call_count == 30 + 0 + 0 + 0 + 0
elif net_use_caching is False:
assert call_count == 30 + 24 + 6 + 24 + 6
else:
assert False, "incorrect parameter passed"
def test_net_use_caching_wrong_value_raises(self, net_cls, classifier_module):
net = net_cls(classifier_module, use_caching='wrong-value')
msg = re.escape(
"Incorrect value for use_caching used ('wrong-value'), "
"use one of: auto, False, True"
)
with pytest.raises(ValueError, match=msg):
net.initialize()
|
TestScoringCacheGlobalControl
|
python
|
pypa__setuptools
|
setuptools/command/alias.py
|
{
"start": 365,
"end": 2380
}
|
class ____(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self) -> None:
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when using --remove"
)
def run(self) -> None:
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
(alias,) = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print(f"No alias definition found for {alias!r}")
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = f'--filename={source!r}'
return source + name + ' ' + command
|
alias
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 65829,
"end": 70427
}
|
class ____(Request):
"""
Returns metrics of the company queues. The metrics are avaraged in the specified interval.
:param from_date: Starting time (in seconds from epoch) for collecting metrics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting metrics
:type to_date: float
:param interval: Time interval in seconds for a single metrics point. The
minimal value is 1
:type interval: int
:param queue_ids: List of queue ids to collect metrics for. If not provided or
empty then all then average metrics across all the company queues will be
returned.
:type queue_ids: Sequence[str]
:param refresh: If set then the new queue metrics is taken
:type refresh: bool
"""
_service = "queues"
_action = "get_queue_metrics"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting metrics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single metrics point. The minimal value is 1",
"type": "integer",
},
"queue_ids": {
"description": "List of queue ids to collect metrics for. If not provided or empty then all then average metrics across all the company queues will be returned.",
"items": {"type": "string"},
"type": "array",
},
"refresh": {
"default": False,
"description": "If set then the new queue metrics is taken",
"type": "boolean",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting metrics",
"type": "number",
},
},
"required": ["from_date", "to_date", "interval"],
"type": "object",
}
def __init__(
self,
from_date: float,
to_date: float,
interval: int,
queue_ids: Optional[List[str]] = None,
refresh: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetQueueMetricsRequest, self).__init__(**kwargs)
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.queue_ids = queue_ids
self.refresh = refresh
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property("queue_ids")
def queue_ids(self) -> Optional[List[str]]:
return self._property_queue_ids
@queue_ids.setter
def queue_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_queue_ids = None
return
self.assert_isinstance(value, "queue_ids", (list, tuple))
self.assert_isinstance(value, "queue_ids", six.string_types, is_array=True)
self._property_queue_ids = value
@schema_property("refresh")
def refresh(self) -> Optional[bool]:
return self._property_refresh
@refresh.setter
def refresh(self, value: Optional[bool]) -> None:
if value is None:
self._property_refresh = None
return
self.assert_isinstance(value, "refresh", (bool,))
self._property_refresh = value
|
GetQueueMetricsRequest
|
python
|
huggingface__transformers
|
src/transformers/loss/loss_grounding_dino.py
|
{
"start": 2444,
"end": 5842
}
|
class ____(HungarianMatcher):
@torch.no_grad()
def forward(self, outputs, targets):
"""
Args:
outputs (`dict`):
A dictionary that contains at least these entries:
* "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
* "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
* "label_maps": Tuple of tensors of dim [num_classes, hidden_dim].
targets (`list[dict]`):
A list of targets (len(targets) = batch_size), where each target is a dict containing:
* "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
ground-truth
objects in the target) containing the class labels
* "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
Returns:
`list[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
batch_size, num_queries = outputs["logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, hidden_dim]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
label_maps = outputs["label_maps"]
# First take the label map for each class in each batch and then concatenate them
label_maps = torch.cat([label_map[target["class_labels"]] for label_map, target in zip(label_maps, targets)])
# Normalize label maps based on number of tokens per class
label_maps = label_maps / label_maps.sum(dim=-1, keepdim=True)
# Also concat the target labels and boxes
target_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
# Compute the classification cost by taking pos and neg cost in the appropriate index
class_cost = (pos_cost_class - neg_cost_class) @ label_maps.t()
# Compute the L1 cost between boxes
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
# Compute the giou cost between boxes
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
# Final cost matrix
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
|
GroundingDinoHungarianMatcher
|
python
|
openai__openai-python
|
src/openai/types/chat/completion_create_params.py
|
{
"start": 15341,
"end": 15887
}
|
class ____(TypedDict, total=False):
city: str
"""Free text input for the city of the user, e.g. `San Francisco`."""
country: str
"""
The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
the user, e.g. `US`.
"""
region: str
"""Free text input for the region of the user, e.g. `California`."""
timezone: str
"""
The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
user, e.g. `America/Los_Angeles`.
"""
|
WebSearchOptionsUserLocationApproximate
|
python
|
django__django
|
tests/staticfiles_tests/test_management.py
|
{
"start": 2153,
"end": 5073
}
|
class ____(TestDefaults, CollectionTestCase):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
path = call_command(
"findstatic", filepath, all=False, verbosity=0, stdout=StringIO()
)
with open(path, encoding="utf-8") as f:
return f.read()
def test_all_files(self):
"""
findstatic returns all candidate files if run without --first and -v1.
"""
result = call_command(
"findstatic", "test/file.txt", verbosity=1, stdout=StringIO()
)
lines = [line.strip() for line in result.split("\n")]
self.assertEqual(
len(lines), 3
) # three because there is also the "Found <file> here" line
self.assertIn("project", lines[1])
self.assertIn("apps", lines[2])
def test_all_files_less_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v0.
"""
result = call_command(
"findstatic", "test/file.txt", verbosity=0, stdout=StringIO()
)
lines = [line.strip() for line in result.split("\n")]
self.assertEqual(len(lines), 2)
self.assertIn("project", lines[0])
self.assertIn("apps", lines[1])
def test_all_files_more_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v2.
Also, test that findstatic returns the searched locations with -v2.
"""
result = call_command(
"findstatic", "test/file.txt", verbosity=2, stdout=StringIO()
)
lines = [line.strip() for line in result.split("\n")]
self.assertIn("project", lines[1])
self.assertIn("apps", lines[2])
self.assertIn("Looking in the following locations:", lines[3])
searched_locations = ", ".join(lines[4:])
# AppDirectoriesFinder searched locations
self.assertIn(
os.path.join("staticfiles_tests", "apps", "test", "static"),
searched_locations,
)
self.assertIn(
os.path.join("staticfiles_tests", "apps", "no_label", "static"),
searched_locations,
)
# FileSystemFinder searched locations
self.assertIn(TEST_SETTINGS["STATICFILES_DIRS"][1][1], searched_locations)
self.assertIn(TEST_SETTINGS["STATICFILES_DIRS"][0], searched_locations)
self.assertIn(str(TEST_SETTINGS["STATICFILES_DIRS"][2]), searched_locations)
# DefaultStorageFinder searched locations
self.assertIn(
os.path.join("staticfiles_tests", "project", "site_media", "media"),
searched_locations,
)
def test_missing_args_message(self):
msg = "Enter at least one staticfile."
with self.assertRaisesMessage(CommandError, msg):
call_command("findstatic")
|
TestFindStatic
|
python
|
simonw__datasette
|
datasette/permissions.py
|
{
"start": 3739,
"end": 3923
}
|
class ____(NamedTuple):
"""A resource with the reason it was allowed (for debugging)."""
resource: Resource
reason: str
@dataclass(frozen=True, kw_only=True)
|
AllowedResource
|
python
|
django__django
|
tests/project_template/test_settings.py
|
{
"start": 152,
"end": 1704
}
|
class ____(SimpleTestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.addCleanup(self.temp_dir.cleanup)
template_settings_py = os.path.join(
os.path.dirname(conf.__file__),
"project_template",
"project_name",
"settings.py-tpl",
)
test_settings_py = os.path.join(self.temp_dir.name, "test_settings.py")
shutil.copyfile(template_settings_py, test_settings_py)
def test_middleware_headers(self):
"""
Ensure headers sent by the default MIDDLEWARE don't inadvertently
change. For example, we never want "Vary: Cookie" to appear in the list
since it prevents the caching of responses.
"""
with extend_sys_path(self.temp_dir.name):
from test_settings import MIDDLEWARE
with self.settings(
MIDDLEWARE=MIDDLEWARE,
ROOT_URLCONF="project_template.urls",
):
response = self.client.get("/empty/")
headers = sorted(response.serialize_headers().split(b"\r\n"))
self.assertEqual(
headers,
[
b"Content-Length: 0",
b"Content-Type: text/html; charset=utf-8",
b"Cross-Origin-Opener-Policy: same-origin",
b"Referrer-Policy: same-origin",
b"X-Content-Type-Options: nosniff",
b"X-Frame-Options: DENY",
],
)
|
TestStartProjectSettings
|
python
|
getsentry__sentry
|
src/sentry/integrations/vercel/webhook.py
|
{
"start": 4784,
"end": 18106
}
|
class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
provider = "vercel"
@csrf_exempt
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase:
return super().dispatch(request, *args, **kwargs)
def parse_external_id(self, request: Request) -> str:
payload = request.data["payload"]
# New Vercel request flow
external_id = (
payload.get("team")["id"]
if (payload.get("team") and payload.get("team") != {})
else payload["user"]["id"]
)
return external_id
def post(self, request: Request) -> Response | None:
if not request.META.get("HTTP_X_VERCEL_SIGNATURE"):
logger.error("vercel.webhook.missing-signature")
return self.respond(status=401)
is_valid = verify_signature(request)
if not is_valid:
logger.error("vercel.webhook.invalid-signature")
return self.respond(status=401)
# Vercel's webhook allows you to subscribe to different events,
# denoted by the `type` attribute. We currently subscribe to:
# * integration-configuration.removed (Configuration Removed)
# * deployment.created (Deployment Created)
# https://vercel.com/docs/integrations/webhooks-overview
try:
event_type = request.data["type"]
except KeyError:
return self.respond({"detail": "Missing event type."}, status=400)
external_id = self.parse_external_id(request)
if event_type == "integration-configuration.removed":
configuration_id = request.data["payload"]["configuration"]["id"]
return self._delete(external_id, configuration_id, request)
if event_type == "deployment.created":
return self._deployment_created(external_id, request)
return None
def delete(self, request: Request):
external_id = self.parse_external_id(request)
configuration_id = request.data["payload"]["configuration"]["id"]
return self._delete(external_id, configuration_id, request)
def _delete(self, external_id, configuration_id, request):
try:
integration = Integration.objects.get(provider="vercel", external_id=external_id)
except Integration.DoesNotExist:
logger.info(
"vercel.uninstall.missing-integration",
extra={"configuration_id": configuration_id, "external_id": external_id},
)
return self.respond(status=404)
org_ids = integration.organizationintegration_set.values_list("organization_id", flat=True)
if len(org_ids) == 0:
# we already deleted the organization integration and
# there was only one to begin with
integration.delete()
return self.respond(status=204)
# If we never set "configurations" in the integration, then we only have one
# and therefore can delete it.
if not integration.metadata.get("configurations"):
integration.delete()
return self.respond(status=204)
configuration = integration.metadata["configurations"].pop(configuration_id)
# one of two cases:
# 1.) someone is deleting from vercel's end and we still need to delete the
# organization integration AND the integration (since there is only one)
# 2.) we already deleted the organization integration tied to this configuration
# and the remaining one is for a different org (and configuration)
if len(org_ids) == 1:
try:
# Case no. 1: do the deleting and return
OrganizationIntegration.objects.get(
organization_id=configuration["organization_id"], integration_id=integration.id
)
create_audit_entry(
request=request,
organization_id=org_ids[0],
target_object=integration.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
actor_label="Vercel User",
data={"provider": integration.provider, "name": integration.name},
)
integration.delete()
return self.respond(status=204)
except OrganizationIntegration.DoesNotExist:
# Case no. 2: continue onto updating integration.metadata
logger.info(
"vercel.uninstall.org-integration-already-deleted",
extra={
"configuration_id": configuration_id,
"external_id": external_id,
"integration_id": integration.id,
"organization_id": configuration["organization_id"],
},
)
if configuration_id == integration.metadata["installation_id"]:
# if we are uninstalling a primary configuration, and there are
# multiple orgs connected to this integration we must update
# the credentials (access_token, webhook_id etc).
next_config_id, next_config = list(integration.metadata["configurations"].items())[0]
integration.metadata["access_token"] = next_config["access_token"]
integration.metadata["webhook_id"] = next_config["webhook_id"]
integration.metadata["installation_id"] = next_config_id
integration.save()
# At this point we can consider if len(orgs) > 1. We have already updated the
# integration.metadata, but we may not have deleted the OrganizationIntegration
try:
OrganizationIntegration.objects.get(
organization_id=configuration["organization_id"], integration_id=integration.id
).delete()
create_audit_entry(
request=request,
organization_id=configuration["organization_id"],
target_object=integration.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
actor_label="Vercel User",
data={"provider": integration.provider, "name": integration.name},
)
except OrganizationIntegration.DoesNotExist:
logger.info(
"vercel.uninstall.org-integration-already-deleted",
extra={
"configuration_id": configuration_id,
"external_id": external_id,
"integration_id": integration.id,
"organization_id": configuration["organization_id"],
},
)
return self.respond(status=204)
def _deployment_created(self, external_id, request):
payload = request.data["payload"]
vercel_project_id = (
payload["projectId"] if payload.get("projectId") else payload["project"]["id"]
)
# Only create releases for production deploys for now
if payload["target"] != "production":
logger.info(
"Ignoring deployment for environment: %s",
payload["target"],
extra={"external_id": external_id, "vercel_project_id": vercel_project_id},
)
return self.respond(status=204)
"""
Steps:
1. Find all org integrations that match the external id
2. Search the configs to find one that matches the vercel project of the webhook
3. Look up the Sentry project that matches
4. Look up the connected internal integration
5. Find the token associated with that installation
6. Determine the commit sha and repo based on what provider is used
7. Create the release using the token WITHOUT refs
8. Update the release with refs
"""
logging_params = {"external_id": external_id, "vercel_project_id": vercel_project_id}
org_integrations = list(
OrganizationIntegration.objects.filter(
integration__external_id=external_id, integration__provider=self.provider
)
)
if not org_integrations:
logger.info("Integration not found", extra=logging_params)
return self.respond({"detail": "Integration not found"}, status=404)
orgs = {
o.id: o
for o in organization_mapping_service.get_many(
organization_ids=[oi.organization_id for oi in org_integrations]
)
}
# for each org integration, search the configs to find one that matches the vercel project of the webhook
for org_integration in org_integrations:
project_mappings = org_integration.config.get("project_mappings") or []
matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings))
if matched_mappings:
organization = orgs.get(org_integration.organization_id)
if organization is None:
continue
sentry_project_id = matched_mappings[0][0]
logging_params["organization_id"] = organization.id
logging_params["project_id"] = sentry_project_id
try:
release_payload, token = get_payload_and_token(
payload, organization.id, sentry_project_id
)
except Project.DoesNotExist:
logger.info("Project not found", extra=logging_params)
return self.respond({"detail": "Project not found"}, status=404)
except SentryAppInstallationForProvider.DoesNotExist:
logger.info("Installation not found", extra=logging_params)
return self.respond({"detail": "Installation not found"}, status=404)
except SentryAppInstallationToken.DoesNotExist:
logger.info("Token not found", extra=logging_params)
return self.respond({"detail": "Token not found"}, status=404)
except NoCommitFoundError:
logger.info("No commit found", extra=logging_params)
return self.respond({"detail": "No commit found"}, status=404)
except MissingRepositoryError:
logger.info("Could not determine repository", extra=logging_params)
return self.respond({"detail": "Could not determine repository"}, status=400)
url = absolute_uri(f"/api/0/organizations/{organization.slug}/releases/")
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {token}",
"User-Agent": f"sentry_vercel/{VERSION}",
}
json_error = None
no_ref_payload = {
"version": release_payload["version"],
"projects": release_payload["projects"],
}
with http.build_session() as session:
try:
resp = session.post(url, json=no_ref_payload, headers=headers)
json_error = safe_json_parse(resp)
resp.raise_for_status()
except RequestException as e:
# errors here should be uncommon but we should be aware of them
logger.exception(
"Error creating release: %s - %s",
e,
json_error,
extra=logging_params,
)
# 400 probably isn't the right status code but oh well
return self.respond({"detail": f"Error creating release: {e}"}, status=400)
# set the refs
try:
resp = session.post(
url,
json=release_payload,
headers=headers,
)
json_error = safe_json_parse(resp)
resp.raise_for_status()
except RequestException as e:
# errors will probably be common if the user doesn't have repos set up
logger.info(
"Error setting refs: %s - %s",
e,
json_error,
extra=logging_params,
exc_info=True,
)
# 400 probably isn't the right status code but oh well
return self.respond({"detail": f"Error setting refs: {e}"}, status=400)
# we are going to quit after the first project match as there shouldn't be multiple matches
return self.respond(status=201)
return self.respond(status=204)
|
VercelWebhookEndpoint
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/distribute_coordinator_test.py
|
{
"start": 34373,
"end": 36185
}
|
class ____(test.TestCase):
def test_std_server_arguments(self):
cs = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cs, "task": {"type": "ps", "id": 0}}
def _mock_run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None):
self.assertEqual(cluster_spec.as_dict(), cs)
self.assertEqual(task_type, "ps")
self.assertEqual(task_id, 0)
self.assertEqual(session_config.experimental.collective_group_leader,
"/job:worker/replica:0/task:0")
self.assertEqual(session_config.intra_op_parallelism_threads, 1)
self.assertEqual(rpc_layer, "grpc")
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _mock_run_std_server):
session_config = config_pb2.ConfigProto()
session_config.intra_op_parallelism_threads = 1
mock_server = distribute_coordinator.run_standard_tensorflow_server(
session_config)
self.assertTrue(mock_server.started)
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminate std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
# Reduce `recovery_wait_secs` from 30 seconds so the test completes quickly.
orig_init = session_manager.SessionManager.__init__
def new_init(*args, **kwargs):
kwargs.pop("recovery_wait_secs", None)
kwargs["recovery_wait_secs"] = 0.5
orig_init(*args, **kwargs)
session_manager.SessionManager.__init__ = new_init
test.main()
|
RunStandardTensorflowServerTest
|
python
|
pytorch__pytorch
|
test/test_overrides.py
|
{
"start": 46546,
"end": 47333
}
|
class ____(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
o_1 = broadcast_all(a_w, b_w)
self.assertTrue(isinstance(o_1[0], Wrapper))
self.assertTrue(isinstance(o_1[1], Wrapper))
self.assertEqual(o_1[0]._data, a)
self.assertEqual(o_1[1]._data, c)
o_2 = broadcast_all(a_w, b)
self.assertTrue(isinstance(o_2[0], Wrapper))
self.assertTrue(isinstance(o_2[1], Wrapper))
self.assertEqual(o_2[0]._data, a)
self.assertEqual(o_2[1]._data, c)
|
TestBroadcastAllOverride
|
python
|
django__django
|
tests/model_inheritance_regress/models.py
|
{
"start": 3657,
"end": 3747
}
|
class ____(models.Model):
keywords = models.CharField(max_length=255)
|
SearchableLocation
|
python
|
walkccc__LeetCode
|
solutions/915. Partition Array into Disjoint Intervals/915.py
|
{
"start": 0,
"end": 326
}
|
class ____:
def partitionDisjoint(self, nums: list[int]) -> int:
n = len(nums)
mn = [0] * (n - 1) + [nums[-1]]
mx = -math.inf
for i in range(n - 2, - 1, -1):
mn[i] = min(mn[i + 1], nums[i])
for i, num in enumerate(nums):
mx = max(mx, num)
if mx <= mn[i + 1]:
return i + 1
|
Solution
|
python
|
astropy__astropy
|
astropy/io/votable/__init__.py
|
{
"start": 846,
"end": 1254
}
|
class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable`.
"""
verify = _config.ConfigItem(
VERIFY_OPTIONS,
"Can be 'exception' (treat fixable violations of the VOTable spec as "
"exceptions), 'warn' (show warnings for VOTable spec violations), or "
"'ignore' (silently ignore VOTable spec violations)",
)
conf = Conf()
|
Conf
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/callbacks/stochastic_weight_avg.py
|
{
"start": 1413,
"end": 17872
}
|
class ____(Callback):
def __init__(
self,
swa_lrs: Union[float, list[float]],
swa_epoch_start: Union[int, float] = 0.8,
annealing_epochs: int = 10,
annealing_strategy: Literal["cos", "linear"] = "cos",
avg_fn: Optional[_AVG_FN] = None,
device: Optional[Union[torch.device, str]] = torch.device("cpu"),
):
r"""Implements the Stochastic Weight Averaging (SWA) Callback to average a model.
Stochastic Weight Averaging was proposed in ``Averaging Weights Leads to
Wider Optima and Better Generalization`` by Pavel Izmailov, Dmitrii
Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson
(UAI 2018).
This documentation is highly inspired by PyTorch's work on SWA.
The callback arguments follow the scheme defined in PyTorch's ``swa_utils`` package.
For a SWA explanation, please take a look
`here <https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging>`_.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
.. warning:: ``StochasticWeightAveraging`` is currently not supported for multiple optimizers/schedulers.
.. warning:: ``StochasticWeightAveraging`` is currently only supported on every epoch.
See also how to :ref:`enable it directly on the Trainer <advanced/training_tricks:Weight Averaging>`.
Arguments:
swa_lrs: The SWA learning rate to use:
- ``float``. Use this value for all parameter groups of the optimizer.
- ``List[float]``. A list values for each parameter group of the optimizer.
swa_epoch_start: If provided as int, the procedure will start from
the ``swa_epoch_start``-th epoch. If provided as float between 0 and 1,
the procedure will start from ``int(swa_epoch_start * max_epochs)`` epoch
annealing_epochs: number of epochs in the annealing phase (default: 10)
annealing_strategy: Specifies the annealing strategy (default: "cos"):
- ``"cos"``. For cosine annealing.
- ``"linear"`` For linear annealing
avg_fn: the averaging function used to update the parameters;
the function must take in the current value of the
:class:`AveragedModel` parameter, the current value of :attr:`model`
parameter and the number of models already averaged; if None,
equally weighted average is used (default: ``None``)
device: if provided, the averaged model will be stored on the ``device``.
When None is provided, it will infer the `device` from ``pl_module``.
(default: ``"cpu"``)
"""
err_msg = "swa_epoch_start should be a >0 integer or a float between 0 and 1."
if isinstance(swa_epoch_start, int) and swa_epoch_start < 1:
raise MisconfigurationException(err_msg)
if isinstance(swa_epoch_start, float) and not (0 <= swa_epoch_start <= 1):
raise MisconfigurationException(err_msg)
wrong_type = not isinstance(swa_lrs, (float, list))
wrong_float = isinstance(swa_lrs, float) and swa_lrs <= 0
wrong_list = isinstance(swa_lrs, list) and not all(lr > 0 and isinstance(lr, float) for lr in swa_lrs)
if wrong_type or wrong_float or wrong_list:
raise MisconfigurationException("The `swa_lrs` should a positive float, or a list of positive floats")
if avg_fn is not None and not callable(avg_fn):
raise MisconfigurationException("The `avg_fn` should be callable.")
if device is not None and not isinstance(device, (torch.device, str)):
raise MisconfigurationException(f"device is expected to be a torch.device or a str. Found {device}")
self.n_averaged: Optional[Tensor] = None
self._swa_epoch_start = swa_epoch_start
self._swa_lrs = swa_lrs
self._annealing_epochs = annealing_epochs
self._annealing_strategy = annealing_strategy
self._avg_fn = avg_fn or self.avg_fn
self._device = device
self._model_contains_batch_norm: Optional[bool] = None
self._average_model: Optional[pl.LightningModule] = None
self._initialized = False
self._swa_scheduler: Optional[LRScheduler] = None
self._scheduler_state: Optional[dict] = None
self._init_n_averaged = 0
self._latest_update_epoch = -1
self.momenta: dict[nn.modules.batchnorm._BatchNorm, Optional[float]] = {}
self._max_epochs: int
@property
def swa_start(self) -> int:
assert isinstance(self._swa_epoch_start, int)
return max(self._swa_epoch_start - 1, 0) # 0-based
@property
def swa_end(self) -> int:
return self._max_epochs - 1 # 0-based
@staticmethod
def pl_module_contains_batch_norm(pl_module: "pl.LightningModule") -> bool:
return any(isinstance(module, nn.modules.batchnorm._BatchNorm) for module in pl_module.modules())
@override
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None:
if isinstance(trainer.strategy, (FSDPStrategy, DeepSpeedStrategy)):
raise MisconfigurationException("SWA does not currently support sharded models.")
# copy the model before moving it to accelerator device.
self._average_model = deepcopy(pl_module)
@override
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if len(trainer.optimizers) != 1:
raise MisconfigurationException("SWA currently works with 1 `optimizer`.")
if len(trainer.lr_scheduler_configs) > 1:
raise MisconfigurationException("SWA currently not supported for more than 1 `lr_scheduler`.")
assert trainer.max_epochs is not None
if isinstance(self._swa_epoch_start, float):
self._swa_epoch_start = int(trainer.max_epochs * self._swa_epoch_start)
self._model_contains_batch_norm = self.pl_module_contains_batch_norm(pl_module)
self._max_epochs = trainer.max_epochs
if self._model_contains_batch_norm:
# virtually increase max_epochs to perform batch norm update on latest epoch.
assert trainer.fit_loop.max_epochs is not None
trainer.fit_loop.max_epochs += 1
if self._scheduler_state is not None:
self._clear_schedulers(trainer)
@override
def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if (not self._initialized) and (self.swa_start <= trainer.current_epoch <= self.swa_end):
self._initialized = True
# move average model to request device.
assert self._average_model is not None
self._average_model = self._average_model.to(self._device or pl_module.device)
optimizer = trainer.optimizers[0]
if isinstance(self._swa_lrs, float):
self._swa_lrs = [self._swa_lrs] * len(optimizer.param_groups)
for lr, group in zip(self._swa_lrs, optimizer.param_groups):
group["initial_lr"] = lr
assert trainer.max_epochs is not None
self._swa_scheduler = cast(
LRScheduler,
SWALR(
optimizer,
swa_lr=self._swa_lrs, # type: ignore[arg-type]
anneal_epochs=self._annealing_epochs,
anneal_strategy=self._annealing_strategy,
last_epoch=trainer.max_epochs if self._annealing_strategy == "cos" else -1,
),
)
if self._scheduler_state is not None:
# Restore scheduler state from checkpoint
self._swa_scheduler.load_state_dict(self._scheduler_state)
elif trainer.current_epoch != self.swa_start:
# Log a warning if we're initializing after start without any checkpoint data,
# as behaviour will be different compared to having checkpoint data.
rank_zero_warn(
"SWA is initializing after swa_start without any checkpoint data. "
"This may be caused by loading a checkpoint from an older version of PyTorch Lightning."
)
# We assert that there is only one optimizer on fit start
default_scheduler_cfg = LRSchedulerConfig(self._swa_scheduler)
assert default_scheduler_cfg.interval == "epoch"
assert default_scheduler_cfg.frequency == 1
if trainer.lr_scheduler_configs:
scheduler_cfg = trainer.lr_scheduler_configs[0]
if scheduler_cfg.interval != "epoch" or scheduler_cfg.frequency != 1:
rank_zero_warn(f"SWA is currently only supported every epoch. Found {scheduler_cfg}")
rank_zero_info(
f"Swapping scheduler `{scheduler_cfg.scheduler.__class__.__name__}`"
f" for `{self._swa_scheduler.__class__.__name__}`"
)
trainer.lr_scheduler_configs[0] = default_scheduler_cfg
else:
trainer.lr_scheduler_configs.append(default_scheduler_cfg)
if self.n_averaged is None:
self.n_averaged = torch.tensor(self._init_n_averaged, dtype=torch.long, device=pl_module.device)
if (self.swa_start <= trainer.current_epoch <= self.swa_end) and (
trainer.current_epoch > self._latest_update_epoch
):
assert self.n_averaged is not None
assert self._average_model is not None
self.update_parameters(self._average_model, pl_module, self.n_averaged, self._avg_fn)
self._latest_update_epoch = trainer.current_epoch
# Note: No > here in case the callback is saved with the model and training continues
if trainer.current_epoch == self.swa_end + 1:
# Transfer weights from average model to pl_module
assert self._average_model is not None
self.transfer_weights(self._average_model, pl_module)
# Reset BatchNorm for update
self.reset_batch_norm_and_save_state(pl_module)
# There is no need to perform either backward or optimizer.step as we are
# performing only one pass over the train data-loader to compute activation statistics
# Therefore, we will virtually increase the number of training batches by 1 and skip backward.
trainer.fit_loop.max_batches += 1
trainer.fit_loop._skip_backward = True
self._accumulate_grad_batches = trainer.accumulate_grad_batches
assert isinstance(trainer.fit_loop.max_batches, int), "Iterable-style datasets are not supported"
trainer.accumulate_grad_batches = trainer.fit_loop.max_batches
@override
def on_train_epoch_end(self, trainer: "pl.Trainer", *args: Any) -> None:
trainer.fit_loop._skip_backward = False
@override
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
# the trainer increases the current epoch before this hook is called
if self._model_contains_batch_norm and trainer.current_epoch - 1 == self.swa_end + 1:
# BatchNorm epoch update. Reset state
trainer.accumulate_grad_batches = self._accumulate_grad_batches
trainer.fit_loop.max_batches -= 1
assert trainer.fit_loop.max_epochs is not None
trainer.fit_loop.max_epochs -= 1
self.reset_momenta()
elif trainer.current_epoch - 1 == self.swa_end:
# Last SWA epoch. Transfer weights from average model to pl_module
assert self._average_model is not None
self.transfer_weights(self._average_model, pl_module)
@staticmethod
def transfer_weights(src_pl_module: "pl.LightningModule", dst_pl_module: "pl.LightningModule") -> None:
for src_param, dst_param in zip(src_pl_module.parameters(), dst_pl_module.parameters()):
dst_param.detach().copy_(src_param.to(dst_param.device))
def reset_batch_norm_and_save_state(self, pl_module: "pl.LightningModule") -> None:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L140-L154."""
self.momenta = {}
for module in pl_module.modules():
if not isinstance(module, nn.modules.batchnorm._BatchNorm):
continue
assert module.running_mean is not None
module.running_mean = torch.zeros_like(
module.running_mean,
device=pl_module.device,
dtype=module.running_mean.dtype,
)
assert module.running_var is not None
module.running_var = torch.ones_like(
module.running_var,
device=pl_module.device,
dtype=module.running_var.dtype,
)
self.momenta[module] = module.momentum
module.momentum = None
assert module.num_batches_tracked is not None
module.num_batches_tracked *= 0
def reset_momenta(self) -> None:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L164-L165."""
for bn_module in self.momenta:
bn_module.momentum = self.momenta[bn_module]
@staticmethod
def update_parameters(
average_model: "pl.LightningModule", model: "pl.LightningModule", n_averaged: Tensor, avg_fn: _AVG_FN
) -> None:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L104-L112."""
for p_swa, p_model in zip(average_model.parameters(), model.parameters()):
device = p_swa.device
p_swa_ = p_swa.detach()
p_model_ = p_model.detach().to(device)
src = p_model_ if n_averaged == 0 else avg_fn(p_swa_, p_model_, n_averaged.to(device))
p_swa_.copy_(src)
n_averaged += 1
@staticmethod
def avg_fn(averaged_model_parameter: Tensor, model_parameter: Tensor, num_averaged: Tensor) -> Tensor:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L95-L97."""
return averaged_model_parameter + (model_parameter - averaged_model_parameter) / (num_averaged + 1)
@override
def state_dict(self) -> dict[str, Any]:
return {
"n_averaged": 0 if self.n_averaged is None else self.n_averaged.item(),
"latest_update_epoch": self._latest_update_epoch,
"scheduler_state": None if self._swa_scheduler is None else self._swa_scheduler.state_dict(),
"average_model_state": None if self._average_model is None else self._average_model.state_dict(),
}
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
self._init_n_averaged = state_dict["n_averaged"]
self._latest_update_epoch = state_dict["latest_update_epoch"]
self._scheduler_state = state_dict["scheduler_state"]
self._load_average_model_state(state_dict["average_model_state"])
@staticmethod
def _clear_schedulers(trainer: "pl.Trainer") -> None:
# If we have scheduler state saved, clear the scheduler configs so that we don't try to
# load state into the wrong type of schedulers when restoring scheduler checkpoint state.
# We'll configure the scheduler and re-load its state in on_train_epoch_start.
# Note that this relies on the callback state being restored before the scheduler state is
# restored, and doesn't work if restore_checkpoint_after_setup is True, but at the time of
# writing that is only True for deepspeed which is already not supported by SWA.
# See https://github.com/Lightning-AI/pytorch-lightning/issues/11665 for background.
if trainer.lr_scheduler_configs:
assert len(trainer.lr_scheduler_configs) == 1
trainer.lr_scheduler_configs.clear()
def _load_average_model_state(self, model_state: Any) -> None:
if self._average_model is None:
return
self._average_model.load_state_dict(model_state)
|
StochasticWeightAveraging
|
python
|
kamyu104__LeetCode-Solutions
|
Python/describe-the-painting.py
|
{
"start": 54,
"end": 632
}
|
class ____(object):
def splitPainting(self, segments):
"""
:type segments: List[List[int]]
:rtype: List[List[int]]
"""
counts = collections.defaultdict(int)
for s, e, c in segments:
counts[s] += c
counts[e] -= c
points = sorted(x for x in counts.iteritems())
result = []
overlap = prev = 0
for curr, cnt in points:
if overlap:
result.append([prev, curr, overlap])
overlap += cnt
prev = curr
return result
|
Solution
|
python
|
coleifer__peewee
|
tests/fields.py
|
{
"start": 27636,
"end": 27728
}
|
class ____(TestModel):
price = IntegerField()
multiplier = FloatField(default=1.)
|
Item
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-lancedb/tests/test_vector_stores_lancedb.py
|
{
"start": 804,
"end": 888
}
|
class ____(LanceModel):
text: str
id_: str
vector: Vector(dim=3)
|
TestModel
|
python
|
django-guardian__django-guardian
|
guardian/testapp/tests/test_backend_integration.py
|
{
"start": 541,
"end": 3448
}
|
class ____(TestCase):
"""Test that the backend integrates correctly with Django's auth system"""
def setUp(self):
self.user = User.objects.create_user(username="testuser", password="testpass")
self.superuser = User.objects.create_superuser(username="superuser", password="superpass")
self.group = Group.objects.create(name="testgroup")
self.user.groups.add(self.group)
self.project = Project.objects.create(name="Test Project")
def test_user_get_group_permissions_integration(self):
"""Test that user.get_group_permissions() calls our backend correctly"""
# Assign permission to group
assign_perm("change_project", self.group, self.project)
# Django's User.get_group_permissions() should now include our backend
group_perms = self.user.get_group_permissions(self.project)
self.assertIn("change_project", group_perms)
def test_user_get_group_permissions_without_object(self):
"""Test that user.get_group_permissions() without object works"""
# When no object is provided, our backend should return empty set
group_perms = self.user.get_group_permissions()
# The result should not include our object-specific permissions
# but may include model-level permissions from ModelBackend
self.assertIsInstance(group_perms, set)
def test_user_get_all_permissions_integration(self):
"""Test that user.get_all_permissions() includes our object permissions"""
# Assign both user and group permissions
assign_perm("change_project", self.user, self.project)
assign_perm("delete_project", self.group, self.project)
# Get all permissions for the object
all_perms = self.user.get_all_permissions(self.project)
# Should include both user and group permissions
self.assertIn("change_project", all_perms)
self.assertIn("delete_project", all_perms)
def test_backend_priority_with_model_backend(self):
"""Test that both backends work together correctly"""
# Our backend should handle object-level permissions
assign_perm("change_project", self.user, self.project)
# Check that the permission is detected
self.assertTrue(self.user.has_perm("testapp.change_project", self.project))
# Verify it comes from our backend (not model-level permission)
all_perms = self.user.get_all_permissions(self.project)
self.assertIn("change_project", all_perms)
def test_anonymous_user_support(self):
"""Test that anonymous user support works correctly"""
from django.contrib.auth.models import AnonymousUser
anonymous = AnonymousUser()
# Should not have group permissions
group_perms = anonymous.get_group_permissions(self.project)
self.assertEqual(set(), group_perms)
|
BackendIntegrationTest
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 114220,
"end": 122436
}
|
class ____:
def test_fixed_lmbda(self):
rng = np.random.RandomState(12345)
# Test positive input
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
n_samples = 20000
rng = np.random.RandomState(1234567)
x = rng.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_input_dtype_complex(self, dtype):
x = np.arange(6, dtype=dtype)
err_msg = ('Yeo-Johnson transformation is not defined for complex '
'numbers.')
with pytest.raises(ValueError, match=err_msg):
stats.yeojohnson(x)
@pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
def test_input_dtype_integer(self, dtype):
x_int = np.arange(8, dtype=dtype)
x_float = np.arange(8, dtype=np.float64)
xt_int, lmbda_int = stats.yeojohnson(x_int)
xt_float, lmbda_float = stats.yeojohnson(x_float)
assert_allclose(xt_int, xt_float, rtol=1e-7)
assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
def test_input_high_variance(self):
# non-regression test for gh-10821
x = np.array([3251637.22, 620695.44, 11642969.00, 2223468.22,
85307500.00, 16494389.89, 917215.88, 11642969.00,
2145773.87, 4962000.00, 620695.44, 651234.50,
1907876.71, 4053297.88, 3251637.22, 3259103.08,
9547969.00, 20631286.23, 12807072.08, 2383819.84,
90114500.00, 17209575.46, 12852969.00, 2414609.99,
2170368.23])
xt_yeo, lam_yeo = stats.yeojohnson(x)
xt_box, lam_box = stats.boxcox(x + 1)
assert_allclose(xt_yeo, xt_box, rtol=1e-6)
assert_allclose(lam_yeo, lam_box, rtol=1e-6)
@pytest.mark.parametrize('x', [
np.array([1.0, float("nan"), 2.0]),
np.array([1.0, float("inf"), 2.0]),
np.array([1.0, -float("inf"), 2.0]),
np.array([-1.0, float("nan"), float("inf"), -float("inf"), 1.0])
])
def test_nonfinite_input(self, x):
with pytest.raises(ValueError, match='Yeo-Johnson input must be finite'):
xt_yeo, lam_yeo = stats.yeojohnson(x)
@pytest.mark.parametrize('x', [
# Attempt to trigger overflow in power expressions.
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
# Attempt to trigger overflow with a large optimal lambda.
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]),
# Attempt to trigger overflow with large data.
np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200])
])
def test_overflow(self, x):
# non-regression test for gh-18389
def optimizer(fun, lam_yeo):
out = optimize.fminbound(fun, -lam_yeo, lam_yeo, xtol=1.48e-08)
result = optimize.OptimizeResult()
result.x = out
return result
with np.errstate(all="raise"):
xt_yeo, lam_yeo = stats.yeojohnson(x)
xt_box, lam_box = stats.boxcox(
x + 1, optimizer=partial(optimizer, lam_yeo=lam_yeo))
assert np.isfinite(np.var(xt_yeo))
assert np.isfinite(np.var(xt_box))
assert_allclose(lam_yeo, lam_box, rtol=1e-6)
assert_allclose(xt_yeo, xt_box, rtol=1e-4)
@pytest.mark.parametrize('x', [
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0])
])
@pytest.mark.parametrize('scale', [1, 1e-12, 1e-32, 1e-150, 1e32, 1e200])
@pytest.mark.parametrize('sign', [1, -1])
def test_overflow_underflow_signed_data(self, x, scale, sign):
# non-regression test for gh-18389
with np.errstate(all="raise"):
xt_yeo, lam_yeo = stats.yeojohnson(sign * x * scale)
assert np.all(np.sign(sign * x) == np.sign(xt_yeo))
assert np.isfinite(lam_yeo)
assert np.isfinite(np.var(xt_yeo))
@pytest.mark.parametrize('x', [
np.array([0, 1, 2, 3]),
np.array([0, -1, 2, -3]),
np.array([0, 0, 0])
])
@pytest.mark.parametrize('sign', [1, -1])
@pytest.mark.parametrize('brack', [None, (-2, 2)])
def test_integer_signed_data(self, x, sign, brack):
with np.errstate(all="raise"):
x_int = sign * x
x_float = x_int.astype(np.float64)
lam_yeo_int = stats.yeojohnson_normmax(x_int, brack=brack)
xt_yeo_int = stats.yeojohnson(x_int, lmbda=lam_yeo_int)
lam_yeo_float = stats.yeojohnson_normmax(x_float, brack=brack)
xt_yeo_float = stats.yeojohnson(x_float, lmbda=lam_yeo_float)
assert np.all(np.sign(x_int) == np.sign(xt_yeo_int))
assert np.isfinite(lam_yeo_int)
assert np.isfinite(np.var(xt_yeo_int))
assert lam_yeo_int == lam_yeo_float
assert np.all(xt_yeo_int == xt_yeo_float)
|
TestYeojohnson
|
python
|
wandb__wandb
|
wandb/integration/sb3/sb3.py
|
{
"start": 1469,
"end": 4797
}
|
class ____(BaseCallback):
"""Callback for logging experiments to Weights and Biases.
Log SB3 experiments to Weights and Biases
- Added model tracking and uploading
- Added complete hyperparameters recording
- Added gradient logging
- Note that `wandb.init(...)` must be called before the WandbCallback can be used.
Args:
verbose: The verbosity of sb3 output
model_save_path: Path to the folder where the model will be saved, The default value is `None` so the model is not logged
model_save_freq: Frequency to save the model
gradient_save_freq: Frequency to log gradient. The default value is 0 so the gradients are not logged
log: What to log. One of "gradients", "parameters", or "all".
"""
def __init__(
self,
verbose: int = 0,
model_save_path: Optional[str] = None,
model_save_freq: int = 0,
gradient_save_freq: int = 0,
log: Optional[Literal["gradients", "parameters", "all"]] = "all",
) -> None:
super().__init__(verbose)
if wandb.run is None:
raise wandb.Error("You must call wandb.init() before WandbCallback()")
with wb_telemetry.context() as tel:
tel.feature.sb3 = True
self.model_save_freq = model_save_freq
self.model_save_path = model_save_path
self.gradient_save_freq = gradient_save_freq
if log not in ["gradients", "parameters", "all", None]:
wandb.termwarn(
"`log` must be one of `None`, 'gradients', 'parameters', or 'all', "
"falling back to 'all'"
)
log = "all"
self.log = log
# Create folder if needed
if self.model_save_path is not None:
os.makedirs(self.model_save_path, exist_ok=True)
self.path = os.path.join(self.model_save_path, "model.zip")
else:
assert self.model_save_freq == 0, (
"to use the `model_save_freq` you have to set the `model_save_path` parameter"
)
def _init_callback(self) -> None:
d = {}
if "algo" not in d:
d["algo"] = type(self.model).__name__
for key in self.model.__dict__:
if key in wandb.config:
continue
if type(self.model.__dict__[key]) in [float, int, str]:
d[key] = self.model.__dict__[key]
else:
d[key] = str(self.model.__dict__[key])
if self.gradient_save_freq > 0:
wandb.watch(
self.model.policy,
log_freq=self.gradient_save_freq,
log=self.log,
)
wandb.config.setdefaults(d)
def _on_step(self) -> bool:
if self.model_save_freq > 0:
if self.model_save_path is not None:
if self.n_calls % self.model_save_freq == 0:
self.save_model()
return True
def _on_training_end(self) -> None:
if self.model_save_path is not None:
self.save_model()
def save_model(self) -> None:
self.model.save(self.path)
wandb.save(self.path, base_path=self.model_save_path)
if self.verbose > 1:
logger.info(f"Saving model checkpoint to {self.path}")
|
WandbCallback
|
python
|
wandb__wandb
|
wandb/apis/public/integrations.py
|
{
"start": 2389,
"end": 2695
}
|
class ____(Integrations):
"""A lazy iterator of `WebhookIntegration` objects.
<!-- lazydoc-ignore-class: internal -->
"""
def _convert(self, node: IntegrationFields) -> WebhookIntegration:
return node if (node.typename__ == "GenericWebhookIntegration") else None
|
WebhookIntegrations
|
python
|
Textualize__textual
|
src/textual/lazy.py
|
{
"start": 1966,
"end": 4329
}
|
class ____(Widget):
"""Similar to [Lazy][textual.lazy.Lazy], but mounts children sequentially.
This is useful when you have so many child widgets that there is a noticeable delay before
you see anything. By mounting the children over several frames, the user will feel that
something is happening.
Example:
```python
def compose(self) -> ComposeResult:
with lazy.Reveal(containers.VerticalScroll(can_focus=False)):
yield Markdown(WIDGETS_MD, classes="column")
yield Buttons()
yield Checkboxes()
yield Datatables()
yield Inputs()
yield ListViews()
yield Logs()
yield Sparklines()
yield Footer()
```
"""
DEFAULT_CSS = """
Reveal {
display: none;
}
"""
def __init__(self, widget: Widget) -> None:
"""
Args:
widget: A widget to mount.
"""
self._replace_widget = widget
self._widgets: list[Widget] = []
super().__init__()
@classmethod
def _reveal(cls, parent: Widget, widgets: list[Widget]) -> None:
"""Reveal children lazily.
Args:
parent: The parent widget.
widgets: Child widgets.
"""
async def check_children() -> None:
"""Check for pending children"""
if not widgets:
return
widget = widgets.pop(0)
try:
await parent.mount(widget)
except Exception:
# I think this can occur if the parent is removed before all children are added
# Only noticed this on shutdown
return
if widgets:
parent.set_timer(0.02, check_children)
parent.call_next(check_children)
def compose_add_child(self, widget: Widget) -> None:
self._widgets.append(widget)
async def mount_composed_widgets(self, widgets: list[Widget]) -> None:
parent = self.parent
if parent is None:
return
assert isinstance(parent, Widget)
await parent.mount(self._replace_widget, after=self)
await self.remove()
self._reveal(self._replace_widget, self._widgets.copy())
self._widgets.clear()
|
Reveal
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/_exec_order_utils.py
|
{
"start": 668,
"end": 16168
}
|
class ____:
"""
This contains the data structures to track the execution order. We track
the pre-forward order on the *first* iteration for forward prefetching
(which thus assumes static graph) and the post-forward order on *every*
iteration for backward prefetching (which thus does not assume static
graph but may be provide an incorrect order).
"""
def __init__(
self,
debug_level: dist.DebugLevel,
backward_prefetch_limit: int,
forward_prefetch_limit: int,
) -> None:
# Tracks the (static) pre-forward order for execution order validation
# and forward prefetching
self.handles_pre_forward_order: list[FlatParamHandle] = []
# Tracks the post-forward order for pre-backward prefetching
self.handles_post_forward_order: list[Optional[FlatParamHandle]] = []
self._iter = 0
# Gives the max number of backward/forward prefetched all-gathers by a
# single module
self._backward_prefetch_limit = backward_prefetch_limit
self._forward_prefetch_limit = forward_prefetch_limit
# Data structures for execution order validation
self._checking_order: bool = debug_level == dist.DebugLevel.DETAIL
self.process_group: Optional[dist.ProcessGroup] = None
self.world_size: Optional[int] = None
self.all_handles: list[FlatParamHandle] = []
# Names are prefixed from the root module
self.param_to_fqn: dict[nn.Parameter, list[str]] = {}
# Current index in the pre-forward execution order
self.current_order_index = 0
self.warn_status = _ExecOrderWarnStatus.NONE
def init(
self,
state: _FSDPState,
root_module: nn.Module,
process_group: dist.ProcessGroup,
) -> None:
"""
Initializes the data structures needed for checking the forward order.
This should be called after a root FSDP instance has been set during
lazy initialization.
"""
self.process_group = process_group
self.rank = process_group.rank()
self.world_size = process_group.size()
# Fix an order over the handles, which should be the same across ranks
for handle in traversal_utils._get_fsdp_handles(root_module):
index = len(self.all_handles)
self.all_handles.append(handle)
handle._handle_index = index
self.param_to_fqn = _get_param_to_fqns(root_module)
# TODO (awgu): We can broadcast the metadata of rank 0's `all_handles`
# to check that all ranks have the same handles in the same order.
# https://github.com/pytorch/pytorch/issues/79620
@property
def is_first_iter(self) -> bool:
return self._iter == 0
def get_handle_to_backward_prefetch(
self,
current_handle: FlatParamHandle,
) -> Optional[FlatParamHandle]:
"""
Returns a :class:`list` of the handles keys of the handles to backward
prefetch given the current handles key. If there are no valid handles
keys to prefetch, then this returns an empty :class:`list`.
"""
current_index = current_handle._post_forward_index
if current_index is None:
return None
target_index = current_index - 1
target_handle: Optional[FlatParamHandle] = None
for _ in range(self._backward_prefetch_limit):
if target_index < 0:
break
target_handle = self.handles_post_forward_order[target_index]
target_index -= 1
return target_handle
def get_handle_to_forward_prefetch(
self,
current_handle: FlatParamHandle,
) -> Optional[FlatParamHandle]:
"""
Returns a :class:`list` of the handles keys of the handles to forward
prefetch given the current handles key. If there are no valid handles
keys to prefetch, then this returns an empty :class:`list`.
"""
current_index = current_handle._pre_forward_order_index
if current_index is None:
return None
target_index = current_index + 1
target_handle: Optional[FlatParamHandle] = None
for _ in range(self._forward_prefetch_limit):
if target_index >= len(self.handles_pre_forward_order):
break
target_handle = self.handles_pre_forward_order[target_index]
target_index += 1
return target_handle
def record_post_forward(self, handle: Optional[FlatParamHandle]) -> None:
"""
Records ``handles`` in the post-forward order, where ``handles`` should
be a group of handles used in the same module's forward. If ``handles``
is empty, then it is omitted.
Unlike :meth:`record_pre_forward`, this records the order *every*
iteration with the expectation that the recorded order is reset in
:meth:`next_iter`.
"""
if not handle:
return
# Only record the first usage of a handles key
if handle._post_forward_index:
self.handles_post_forward_order.append(handle)
return
index = len(self.handles_post_forward_order)
handle._post_forward_index = index
self.handles_post_forward_order.append(handle)
def record_pre_forward(
self, handle: Optional[FlatParamHandle], is_training: bool
) -> None:
"""
Records ``handles`` in the pre-forward order, where ``handles`` should
be a group of handles used in the same module's forward. If ``handles``
is empty, then it is omitted.
On the first iteration, this checks the execution order across ranks.
See :meth:`_check_order` for details.
"""
if not handle:
return
self._check_order(handle, is_training)
# Fix the order after the first iteration and only record the first
# usage of a handles key
if not self.is_first_iter or handle._pre_forward_order_index is not None:
return
index = len(self.handles_pre_forward_order)
handle._pre_forward_order_index = index
self.handles_pre_forward_order.append(handle)
def _check_order(self, handle: FlatParamHandle, is_training: bool) -> None:
"""
Checks the forward execution order as long as ``is_training`` is
``True`` since checking in eval mode is not supported. This only checks
if the distributed debug level is DETAIL.
- On the first iteration, this uses all-gathers to check that all ranks
are all-gathering the same handles and hence ``FlatParameter`` s,
raising an error if not.
- On subsequent iterations, this checks that each rank is locally
consistent with its own forward order from the first iteration, issuing
a warning if not. This issues a warning on the first deviating
iteration and stops warning thereafter.
"""
# Do not check order in eval mode since the post-backward callback does
# not run so it cannot be used to mark the end of an iteration
if not is_training or not self._checking_order:
return
if self.is_first_iter:
msg_prefix = "Forward order differs across ranks:"
optional_local_indices: tuple[Optional[int], ...] = (
self._get_handle_indices(handle)
)
device = handle.device # guaranteed to be non-CPU
num_valid_indices = sum(
(index is not None) for index in optional_local_indices
)
tensor_kwargs: dict[str, Union[torch.dtype, torch.device]] = {
"dtype": torch.int32,
"device": device,
}
world_num_valid_indices = torch.zeros(self.world_size, **tensor_kwargs) # type: ignore[arg-type, call-overload]
local_num_valid_indices = torch.tensor([num_valid_indices], **tensor_kwargs) # type: ignore[arg-type, call-overload]
dist.all_gather_into_tensor(
world_num_valid_indices,
local_num_valid_indices,
group=self.process_group,
)
# Copy entire tensor from D2H once to avoid per element D2H copies
world_num_valid_indices = world_num_valid_indices.cpu()
# Check that all ranks plan to all-gather the same number of
# parameters
# TODO (awgu): Since every module has at most one handle in the
# current implementation, this should never raise the error.
if self.world_size is None:
raise AssertionError("Expected world_size to not be None")
if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
# TODO(voz): Don't graph break on this - dynamo hates the n1 != n2
# tensor comparison control flow.
# https://github.com/pytorch/pytorch/issues/107055
for (r1, n1), (r2, n2) in itertools.combinations(
(
(rank, world_num_valid_indices[rank])
for rank in range(self.world_size)
),
2,
):
if n1 != n2:
raise RuntimeError(
f"{msg_prefix} rank {r1} is all-gathering {n1} parameters "
f"while rank {r2} is all-gathering {n2} parameters"
)
world_indices = torch.zeros( # type: ignore[call-overload]
self.world_size * num_valid_indices, **tensor_kwargs
)
local_indices = torch.tensor(optional_local_indices, **tensor_kwargs) # type: ignore[arg-type]
dist.all_gather_into_tensor(
world_indices, local_indices, group=self.process_group
)
# Copy entire tensor from D2H once to avoid per element D2H copies
world_indices = world_indices.cpu()
# Check that all ranks plan to all-gather the same index parameters
if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
# TODO(voz): Don't graph break on this - dynamo hates the i1 != i2
# tensor comparison control flow.
# https://github.com/pytorch/pytorch/issues/107055
for (r1, i1), (r2, i2) in itertools.combinations(
(
(
rank,
world_indices[
rank * num_valid_indices : (rank + 1)
* num_valid_indices
],
)
for rank in range(self.world_size)
),
2,
):
if i1 != i2:
r1_param_names = self._get_names_from_handle_indices(i1)
r2_param_names = self._get_names_from_handle_indices(i2)
raise RuntimeError(
f"{msg_prefix} rank {r1} is all-gathering parameters "
f"for {r1_param_names} while rank {r2} is all-gathering "
f"parameters for {r2_param_names}"
)
else:
# Only issue warnings on the first deviating iteration and stop
# checking thereafter to avoid flooding the console
if self.warn_status == _ExecOrderWarnStatus.WARNED:
return
msg_prefix = None # non-`None` means we should warn
if self.current_order_index >= len(self.handles_pre_forward_order):
# This iteration sees extra all-gather(s) compared to the first
msg_prefix = (
"Expected to not all-gather any more parameters in the "
"forward but trying to all-gather parameters for "
)
else:
expected_handle = self.handles_pre_forward_order[
self.current_order_index
]
if expected_handle != handle:
expected_param_names = self._get_names_from_handles(expected_handle)
msg_prefix = (
f"Expected to all-gather for {expected_param_names} "
"but trying to all-gather parameters for "
)
if msg_prefix is not None:
param_names = self._get_names_from_handles(handle)
msg_suffix = (
f"{param_names}"
if param_names
else "a newly-added parameter since construction time"
)
warnings.warn(
"Forward order differs from that of the first iteration "
f"on rank {self.rank}. Collectives are unchecked and may "
f"give incorrect results or hang.\n{msg_prefix}{msg_suffix}",
stacklevel=2,
)
self.warn_status = _ExecOrderWarnStatus.WARNING
self.current_order_index += 1
def _get_handle_indices(
self,
handle: FlatParamHandle,
) -> tuple[Optional[int], ...]:
"""
Returns the handle indices (i.e. indices into ``self.all_handles``)
corresponding to the handles in ``handle``. An entry in the
returned tuple is ``None`` if the handle is invalid.
"""
indices: list[Optional[int]] = []
if handle:
indices.append(handle._handle_index)
return tuple(indices)
def _get_names_from_handle_indices(
self,
handle_indices: tuple[int, ...],
) -> list[list[str]]:
"""
Returns a list of FQNs for each handle in ``handle_indices``. If a
handle index is invalid, then its FQNs are omitted from the returned
list.
"""
fqns: list[list[str]] = []
for index in handle_indices:
if index is None or index < 0 or index >= len(self.all_handles):
continue
handle = self.all_handles[index]
flat_param = handle.flat_param
fqns.append(self.param_to_fqn[flat_param])
return fqns
def _get_names_from_handles(
self,
handle: FlatParamHandle,
) -> list[list[str]]:
"""
Returns a list of FQNs for each handle in ``handles_key``. If a handle
is invalid, then its FQNs are omitted from the returned list.
"""
fqns: list[list[str]] = []
if handle:
flat_param = handle.flat_param
if flat_param in self.param_to_fqn:
fqns.append(self.param_to_fqn[flat_param])
return fqns
def next_iter(self):
"""
Advances the internal data structures per iteration. This should be
called in the post-backward callback since that marks the true end of
an iteration.
"""
self._iter += 1
self.handles_post_forward_order.clear()
if self._checking_order:
self.current_order_index = 0
if self.warn_status == _ExecOrderWarnStatus.WARNING:
self.warn_status = _ExecOrderWarnStatus.WARNED
|
_ExecOrderData
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_to_be_equal_to_or_greater_than_profile_min.py
|
{
"start": 864,
"end": 2735
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.greater_than_or_equal_to_profile_min"
condition_value_keys = ("profile",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls: Any, column: str, profile: Any, **kwargs) -> np.ndarray:
columnPresent = (
column.name
in profile["global_stats"]["profile_schema"] # checks to ensure column exists
)
transpose = np.array(column).T
if not (columnPresent): # Err column in user DF not present in input profile
return transpose != transpose # Returns 100% unexpected
index = profile["global_stats"]["profile_schema"][column.name][
0
] # Gets index of column from profile
dataType = profile["data_stats"][index]["data_type"] # Checks datatype
if dataType != "int" and dataType != "float": # Err non-numerical column
return transpose != transpose # Returns 100% unexpected
minimum = float(profile["data_stats"][index]["statistics"]["min"])
return transpose >= minimum
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesGreaterThanOrEqualToProfileMin
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec11.py
|
{
"start": 198,
"end": 731
}
|
class ____(Generic[_P, _R]):
def __init__(self, function: Callable[_P, _R]):
self.function = function
def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
print("Inside Function Call")
return self.function(*args, **kwargs)
def do_stuff(self, name: str, *args: _P.args, **kwargs: _P.kwargs) -> int:
return 0
@MyDecorator
def func1(x: int, y: int, *, z: int):
return x + y
func1(6, 6, z=6)
@MyDecorator
def func2(*, a: int):
pass
func2.do_stuff("hi", a=4)
|
MyDecorator
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 274858,
"end": 276027
}
|
class ____(
ConditionalValueDefstringnullExprRef
):
"""
ConditionalParameterValueDefstringnullExprRef schema wrapper.
Parameters
----------
param : str, :class:`ParameterName`
Filter using a parameter name.
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
empty : bool
For selection parameters, the predicate of empty selections returns true by default.
Override this behavior, by setting this property ``empty: false``.
"""
_schema = {
"$ref": "#/definitions/ConditionalParameter<ValueDef<(string|null|ExprRef)>>"
}
def __init__(
self,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
**kwds,
):
super().__init__(param=param, value=value, empty=empty, **kwds)
|
ConditionalParameterValueDefstringnullExprRef
|
python
|
getsentry__sentry
|
tests/sentry/grouping/test_grouphash_metadata.py
|
{
"start": 7017,
"end": 8635
}
|
class ____(TestCase):
def test_check_grouphashes_for_positive_fingerprint_match(self) -> None:
grouphash1 = GroupHash.objects.create(hash="dogs", project_id=self.project.id)
grouphash2 = GroupHash.objects.create(hash="are great", project_id=self.project.id)
for fingerprint1, fingerprint2, expected_result in [
# All combos of default, hybrid (matching or not), custom (matching or not), and missing
# fingerprints
(["{{ default }}"], ["{{ default }}"], True),
(["{{ default }}"], ["{{ default }}", "maisey"], False),
(["{{ default }}"], ["charlie"], False),
(["{{ default }}"], None, False),
(["{{ default }}", "maisey"], ["{{ default }}", "maisey"], True),
(["{{ default }}", "maisey"], ["{{ default }}", "charlie"], False),
(["{{ default }}", "maisey"], ["charlie"], False),
(["{{ default }}", "maisey"], None, False),
(["charlie"], ["charlie"], True),
(["charlie"], ["maisey"], False),
(["charlie"], None, False),
(None, None, False),
]:
with (
patch.object(grouphash1, "get_associated_fingerprint", return_value=fingerprint1),
patch.object(grouphash2, "get_associated_fingerprint", return_value=fingerprint2),
):
assert (
check_grouphashes_for_positive_fingerprint_match(grouphash1, grouphash2)
== expected_result
), f"Case {fingerprint1}, {fingerprint2} failed"
|
GroupHashMetadataTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/core/config_lib.py
|
{
"start": 1085,
"end": 1158
}
|
class ____(enum.Enum):
NONE = 0
CONVERT = 1
DO_NOT_CONVERT = 2
|
Action
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/saneheaders.py
|
{
"start": 541,
"end": 965
}
|
class ____(Extension):
"""Adds the sane headers extension."""
def extendMarkdown(self, md):
"""Extend the inline and block processor objects."""
md.parser.blockprocessors.register(SaneHeadersProcessor(md.parser), 'hashheader', 70)
md.registerExtension(self)
def makeExtension(*args, **kwargs):
"""Return extension."""
return SaneHeadersExtension(*args, **kwargs)
|
SaneHeadersExtension
|
python
|
google__pytype
|
pytype/rewrite/abstract/functions_test.py
|
{
"start": 2875,
"end": 4883
}
|
class ____(test_utils.ContextfulTestBase):
def test_init(self):
func_code = _get_const("""
def f(x, /, *args, y, **kwargs):
pass
""")
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=func_code, enclosing_scope=(),
parent_frame=FakeFrame(self.ctx))
self.assertEqual(len(f.signatures), 1)
self.assertEqual(repr(f.signatures[0]),
'def f(x, /, *args, y, **kwargs) -> Any')
def test_map_args(self):
func_code = _get_const('def f(x): ...')
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=func_code, enclosing_scope=(),
parent_frame=FakeFrame(self.ctx))
x = self.ctx.consts[0].to_variable()
mapped_args = f.map_args(functions.Args(posargs=(x,)))
self.assertEqual(mapped_args.signature, f.signatures[0])
self.assertEqual(mapped_args.argdict, {'x': x})
def test_call_with_mapped_args(self):
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=_get_const('def f(x): ...'),
enclosing_scope=(), parent_frame=FakeFrame(self.ctx))
x = self.ctx.consts[0].to_variable()
mapped_args = functions.MappedArgs(f.signatures[0], {'x': x})
frame = f.call_with_mapped_args(mapped_args)
assert_type(frame, FakeFrame)
self.assertIsInstance(frame, FakeFrame)
def test_call(self):
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=_get_const('def f(): ...'),
enclosing_scope=(), parent_frame=FakeFrame(self.ctx))
frame = f.call(functions.Args())
assert_type(frame, FakeFrame)
self.assertIsInstance(frame, FakeFrame)
def test_analyze(self):
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=_get_const('def f(): ...'),
enclosing_scope=(), parent_frame=FakeFrame(self.ctx))
frames = f.analyze()
assert_type(frames, Sequence[FakeFrame])
self.assertEqual(len(frames), 1)
self.assertIsInstance(frames[0], FakeFrame)
|
InterpreterFunctionTest
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/git_sparsepaths_version/package.py
|
{
"start": 217,
"end": 503
}
|
class ____(Package):
"""Mock package with git_sparse_paths attribute"""
homepage = "http://www.git-fetch-example.com"
git = "https://a/really.com/big/repo.git"
version("1.0", tag="v1.0", git_sparse_paths=["foo", "bar"])
version("0.9", tag="v0.9")
|
GitSparsepathsVersion
|
python
|
PrefectHQ__prefect
|
src/prefect/concurrency/v1/context.py
|
{
"start": 229,
"end": 1107
}
|
class ____(ContextModel):
__var__: ClassVar[ContextVar[Self]] = ContextVar("concurrency_v1")
# Track the limits that have been acquired but were not able to be released
# due to cancellation or some other error. These limits are released when
# the context manager exits.
cleanup_slots: list[tuple[list[str], float, UUID]] = Field(default_factory=list)
def __exit__(self, *exc_info: Any) -> None:
if self.cleanup_slots:
with get_client(sync_client=True) as client:
for names, occupancy_seconds, task_run_id in self.cleanup_slots:
client.decrement_v1_concurrency_slots(
names=names,
occupancy_seconds=occupancy_seconds,
task_run_id=task_run_id,
)
return super().__exit__(*exc_info)
|
ConcurrencyContext
|
python
|
django__django
|
tests/admin_inlines/tests.py
|
{
"start": 70421,
"end": 74820
}
|
class ____(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
@override_settings(DEBUG=True)
def test_fieldset_context_fully_set(self):
url = reverse("admin:admin_inlines_photographer_add")
with self.assertRaisesMessage(AssertionError, "no logs"):
with self.assertLogs("django.template", "DEBUG"):
self.client.get(url)
def test_inline_headings(self):
response = self.client.get(reverse("admin:admin_inlines_photographer_add"))
# Page main title.
self.assertContains(response, "<h1>Add photographer</h1>", html=True)
# Headings for the toplevel fieldsets. The first one has no name.
self.assertContains(response, '<fieldset class="module aligned ">')
# The second and third have the same "Advanced options" name, but the
# second one has the "collapse" class.
for x, classes in ((1, ""), (2, "collapse")):
heading_id = f"fieldset-0-{x}-heading"
with self.subTest(heading_id=heading_id):
self.assertContains(
response,
f'<fieldset class="module aligned {classes}" '
f'aria-labelledby="{heading_id}">',
)
self.assertContains(
response,
f'<h2 id="{heading_id}" class="fieldset-heading">'
"Advanced options</h2>",
)
self.assertContains(response, f'id="{heading_id}"', count=1)
# Headings and subheadings for all the inlines.
for inline_admin_formset in response.context["inline_admin_formsets"]:
prefix = inline_admin_formset.formset.prefix
heading_id = f"{prefix}-heading"
formset_heading = (
f'<h2 id="{heading_id}" class="inline-heading">Photos</h2>'
)
self.assertContains(response, formset_heading, html=True)
self.assertContains(response, f'id="{heading_id}"', count=1)
# If this is a TabularInline, do not make further asserts since
# fieldsets are not shown as such in this table layout.
if "tabular" in inline_admin_formset.opts.template:
continue
if "collapse" in inline_admin_formset.classes:
formset_heading = f"<summary>{formset_heading}</summary>"
self.assertContains(response, formset_heading, html=True, count=1)
# Headings for every formset (the amount depends on `extra`).
for y, inline_admin_form in enumerate(inline_admin_formset):
y_plus_one = y + 1
form_heading = (
f'<h3><b>Photo:</b> <span class="inline_label">#{y_plus_one}</span>'
"</h3>"
)
self.assertContains(response, form_heading, html=True)
# Every fieldset defined for an inline's form.
for z, fieldset in enumerate(inline_admin_form):
if fieldset.name:
heading_id = f"{prefix}-{y}-{z}-heading"
self.assertContains(
response,
f'<fieldset class="module aligned {fieldset.classes}" '
f'aria-labelledby="{heading_id}">',
)
fieldset_heading = (
f'<h4 id="{heading_id}" class="fieldset-heading">'
f"Details</h4>"
)
self.assertContains(response, fieldset_heading)
if "collapse" in fieldset.classes:
self.assertContains(
response,
f"<summary>{fieldset_heading}</summary>",
html=True,
)
self.assertContains(response, f'id="{heading_id}"', count=1)
else:
fieldset_html = (
f'<fieldset class="module aligned {fieldset.classes}">'
)
self.assertContains(response, fieldset_html)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
|
TestInlineWithFieldsets
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_github_username.py
|
{
"start": 560,
"end": 1898
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_github_username"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def valid_github_user(x):
if not isinstance(x, str):
return False
try:
urlopen(Request(BASE_API_URL + x, headers={"User-Agent": "great_expectations"}))
return True
except (URLError, HTTPError):
return False
return column.apply(lambda x: valid_github_user(x) if x else False)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidGithubUsername
|
python
|
scikit-learn__scikit-learn
|
examples/miscellaneous/plot_metadata_routing.py
|
{
"start": 23907,
"end": 24849
}
|
class ____(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y, **fit_params):
routed_params = process_routing(self, "fit", **fit_params)
self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)
def get_metadata_routing(self):
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
return router
# %%
# As explained above, this is a valid usage if `my_weights` aren't supposed
# to be passed as `sample_weight` to `MetaRegressor`:
reg = MetaRegressor(estimator=LinearRegression().set_fit_request(sample_weight=True))
reg.fit(X, y, sample_weight=my_weights)
# %%
# Now imagine we further develop ``MetaRegressor`` and it now also *consumes*
# ``sample_weight``:
|
MetaRegressor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/n-repeated-element-in-size-2n-array.py
|
{
"start": 29,
"end": 290
}
|
class ____(object):
def repeatedNTimes(self, A):
"""
:type A: List[int]
:rtype: int
"""
for i in xrange(2, len(A)):
if A[i-1] == A[i] or A[i-2] == A[i]:
return A[i]
return A[0]
|
Solution
|
python
|
realpython__materials
|
python-property/circle_v1.py
|
{
"start": 0,
"end": 480
}
|
class ____:
def __init__(self, radius):
self._radius = radius
def _get_radius(self):
print("Get radius")
return self._radius
def _set_radius(self, value):
print("Set radius")
self._radius = value
def _del_radius(self):
print("Delete radius")
del self._radius
radius = property(
fget=_get_radius,
fset=_set_radius,
fdel=_del_radius,
doc="The radius property.",
)
|
Circle
|
python
|
fluentpython__example-code
|
attic/sequences/slice_viewer.py
|
{
"start": 531,
"end": 603
}
|
class ____:
def __getitem__(self, position):
return position
|
SliceViewer
|
python
|
bokeh__bokeh
|
src/bokeh/models/glyphs.py
|
{
"start": 46554,
"end": 47842
}
|
class ____(XYGlyph, LineGlyph):
''' Render step lines.
Step levels can be draw before, after, or centered on each point, according
to the value of the ``mode`` property.
The x-coordinates are assumed to be (and must be) sorted in ascending order
for steps to be properly rendered.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Step.py"
_args = ('x', 'y')
x = NumberSpec(default=field("x"), help="""
The x-coordinates for the steps.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates for the steps.
""")
line_props = Include(ScalarLineProps, help="""
The {prop} values for the steps.
""")
mode = Enum(StepMode, default="before", help="""
Where the step "level" should be drawn in relation to the x and y
coordinates. The parameter can assume one of three values:
* ``before``: (default) Draw step levels before each x-coordinate (no step before the first point)
* ``after``: Draw step levels after each x-coordinate (no step after the last point)
* ``center``: Draw step levels centered on each x-coordinate
""")
|
Step
|
python
|
numpy__numpy
|
numpy/distutils/fcompiler/intel.py
|
{
"start": 996,
"end": 2654
}
|
class ____(BaseIntelFCompiler):
compiler_type = 'intel'
compiler_aliases = ('ifort',)
description = 'Intel Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
possible_executables = ['ifort', 'ifc']
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : [None, "-72", "-w90", "-w95"],
'compiler_f90' : [None],
'compiler_fix' : [None, "-FI"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_free(self):
return ['-FR']
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
return ['-fp-model', 'strict', '-O1',
'-assume', 'minus0', '-{}'.format(mpopt)]
def get_flags_arch(self):
return []
def get_flags_linker_so(self):
opt = FCompiler.get_flags_linker_so(self)
v = self.get_version()
if v and v >= '8.0':
opt.append('-nofor_main')
if sys.platform == 'darwin':
# Here, it's -dynamiclib
try:
idx = opt.index('-shared')
opt.remove('-shared')
except ValueError:
idx = 0
opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
return opt
|
IntelFCompiler
|
python
|
kubernetes-client__python
|
kubernetes/client/api/admissionregistration_v1beta1_api.py
|
{
"start": 543,
"end": 190008
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_mutating_admission_policy(self, body, **kwargs): # noqa: E501
"""create_mutating_admission_policy # noqa: E501
create a MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_admission_policy(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1MutatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_mutating_admission_policy_with_http_info(body, **kwargs) # noqa: E501
def create_mutating_admission_policy_with_http_info(self, body, **kwargs): # noqa: E501
"""create_mutating_admission_policy # noqa: E501
create a MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_admission_policy_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1MutatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_mutating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_mutating_admission_policy_binding(self, body, **kwargs): # noqa: E501
"""create_mutating_admission_policy_binding # noqa: E501
create a MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_admission_policy_binding(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1MutatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_mutating_admission_policy_binding_with_http_info(body, **kwargs) # noqa: E501
def create_mutating_admission_policy_binding_with_http_info(self, body, **kwargs): # noqa: E501
"""create_mutating_admission_policy_binding # noqa: E501
create a MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_admission_policy_binding_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1MutatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_mutating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_mutating_admission_policy(self, **kwargs): # noqa: E501
"""delete_collection_mutating_admission_policy # noqa: E501
delete collection of MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_admission_policy(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_mutating_admission_policy_with_http_info(**kwargs) # noqa: E501
def delete_collection_mutating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_mutating_admission_policy # noqa: E501
delete collection of MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_admission_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_mutating_admission_policy_binding(self, **kwargs): # noqa: E501
"""delete_collection_mutating_admission_policy_binding # noqa: E501
delete collection of MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_admission_policy_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_mutating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
def delete_collection_mutating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_mutating_admission_policy_binding # noqa: E501
delete collection of MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_admission_policy_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mutating_admission_policy(self, name, **kwargs): # noqa: E501
"""delete_mutating_admission_policy # noqa: E501
delete a MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_admission_policy(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_mutating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
def delete_mutating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_mutating_admission_policy # noqa: E501
delete a MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_admission_policy_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_mutating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mutating_admission_policy_binding(self, name, **kwargs): # noqa: E501
"""delete_mutating_admission_policy_binding # noqa: E501
delete a MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_admission_policy_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_mutating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
def delete_mutating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_mutating_admission_policy_binding # noqa: E501
delete a MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_admission_policy_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_mutating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_mutating_admission_policy(self, **kwargs): # noqa: E501
"""list_mutating_admission_policy # noqa: E501
list or watch objects of kind MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_admission_policy(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_mutating_admission_policy_with_http_info(**kwargs) # noqa: E501
def list_mutating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
"""list_mutating_admission_policy # noqa: E501
list or watch objects of kind MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_admission_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_mutating_admission_policy_binding(self, **kwargs): # noqa: E501
"""list_mutating_admission_policy_binding # noqa: E501
list or watch objects of kind MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_admission_policy_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_mutating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
def list_mutating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
"""list_mutating_admission_policy_binding # noqa: E501
list or watch objects of kind MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_admission_policy_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyBindingList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyBindingList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_mutating_admission_policy(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_admission_policy # noqa: E501
partially update the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_admission_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_mutating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
def patch_mutating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_admission_policy # noqa: E501
partially update the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_admission_policy_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_mutating_admission_policy`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_mutating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_mutating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_admission_policy_binding # noqa: E501
partially update the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_admission_policy_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_mutating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
def patch_mutating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_admission_policy_binding # noqa: E501
partially update the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_admission_policy_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_mutating_admission_policy_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_mutating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_mutating_admission_policy(self, name, **kwargs): # noqa: E501
"""read_mutating_admission_policy # noqa: E501
read the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_admission_policy(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_mutating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
def read_mutating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
"""read_mutating_admission_policy # noqa: E501
read the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_admission_policy_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_mutating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_mutating_admission_policy_binding(self, name, **kwargs): # noqa: E501
"""read_mutating_admission_policy_binding # noqa: E501
read the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_admission_policy_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_mutating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
def read_mutating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""read_mutating_admission_policy_binding # noqa: E501
read the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_admission_policy_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_mutating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_mutating_admission_policy(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_admission_policy # noqa: E501
replace the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_admission_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param V1beta1MutatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_mutating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
def replace_mutating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_admission_policy # noqa: E501
replace the specified MutatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_admission_policy_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicy (required)
:param V1beta1MutatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_mutating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_mutating_admission_policy`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_mutating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicies/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_mutating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_admission_policy_binding # noqa: E501
replace the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_admission_policy_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param V1beta1MutatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1MutatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_mutating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
def replace_mutating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_admission_policy_binding # noqa: E501
replace the specified MutatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_admission_policy_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingAdmissionPolicyBinding (required)
:param V1beta1MutatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1MutatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_mutating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_mutating_admission_policy_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_mutating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1beta1/mutatingadmissionpolicybindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1MutatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
AdmissionregistrationV1beta1Api
|
python
|
neetcode-gh__leetcode
|
python/0015-3sum.py
|
{
"start": 0,
"end": 803
}
|
class ____:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
for i, a in enumerate(nums):
# Skip positive integers
if a > 0:
break
if i > 0 and a == nums[i - 1]:
continue
l, r = i + 1, len(nums) - 1
while l < r:
threeSum = a + nums[l] + nums[r]
if threeSum > 0:
r -= 1
elif threeSum < 0:
l += 1
else:
res.append([a, nums[l], nums[r]])
l += 1
r -= 1
while nums[l] == nums[l - 1] and l < r:
l += 1
return res
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/api/serializers/platform_external_issue.py
|
{
"start": 360,
"end": 540
}
|
class ____(TypedDict):
id: str
issueId: str
serviceType: str
displayName: str
webUrl: str
@register(PlatformExternalIssue)
|
PlatformExternalIssueSerializerResponse
|
python
|
kamyu104__LeetCode-Solutions
|
Python/properties-graph.py
|
{
"start": 1166,
"end": 2474
}
|
class ____(object):
def numberOfComponents(self, properties, k):
"""
:type properties: List[List[int]]
:type k: int
:rtype: int
"""
class UnionFind(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x, y = self.find_set(x), self.find_set(y)
if x == y:
return False
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
self.set[x] = self.set[y]
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
return True
p_set = [set(p) for p in properties]
uf = UnionFind(len(properties))
return len(properties)-sum(sum(x in p_set[j] for x in p_set[i]) >= k and uf.union_set(i, j) for i in xrange(len(p_set)) for j in xrange(i+1, len(p_set)))
|
Solution2
|
python
|
spyder-ide__spyder
|
spyder/plugins/shortcuts/plugin.py
|
{
"start": 1292,
"end": 1468
}
|
class ____:
ShortcutSummaryAction = "show_shortcut_summary_action"
# --- Plugin
# ----------------------------------------------------------------------------
|
ShortcutActions
|
python
|
h5py__h5py
|
h5py/tests/test_vds/test_highlevel_vds.py
|
{
"start": 468,
"end": 2341
}
|
class ____(ut.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ['raw_file_1.h5', 'raw_file_2.h5', 'raw_file_3.h5']
for k, outfile in enumerate(self.fname):
filename = osp.join(self.working_dir, outfile)
f = h5.File(filename, 'w')
f['data'] = np.ones((20, 200, 200)) * k
f.close()
f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w')
f['data'] = np.ones((18, 200, 200)) * 3
self.fname.append('raw_file_4.h5')
self.fname = [osp.join(self.working_dir, ix) for ix in self.fname]
f.close()
def test_eiger_high_level(self):
outfile = osp.join(self.working_dir, make_name('eiger{}.h5'))
layout = h5.VirtualLayout(shape=(78, 200, 200), dtype=float)
M_minus_1 = 0
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
for foo in self.fname:
in_data = h5.File(foo, 'r')['data']
src_shape = in_data.shape
in_data.file.close()
M = M_minus_1 + src_shape[0]
vsource = h5.VirtualSource(foo, 'data', shape=src_shape)
layout[M_minus_1:M, :, :] = vsource
M_minus_1 = M
f.create_virtual_dataset('data', layout, fillvalue=45)
f = h5.File(outfile, 'r')['data']
self.assertEqual(f[10, 100, 10], 0.0)
self.assertEqual(f[30, 100, 100], 1.0)
self.assertEqual(f[50, 100, 100], 2.0)
self.assertEqual(f[70, 100, 100], 3.0)
f.file.close()
def tearDown(self):
shutil.rmtree(self.working_dir)
'''
Unit test for the high level vds interface for excalibur
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
|
TestEigerHighLevel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/print-foobar-alternately.py
|
{
"start": 48,
"end": 1065
}
|
class ____(object):
def __init__(self, n):
self.__n = n
self.__curr = False
self.__cv = threading.Condition()
def foo(self, printFoo):
"""
:type printFoo: method
:rtype: void
"""
for i in xrange(self.__n):
with self.__cv:
while self.__curr != False:
self.__cv.wait()
self.__curr = not self.__curr
# printFoo() outputs "foo". Do not change or remove this line.
printFoo()
self.__cv.notify()
def bar(self, printBar):
"""
:type printBar: method
:rtype: void
"""
for i in xrange(self.__n):
with self.__cv:
while self.__curr != True:
self.__cv.wait()
self.__curr = not self.__curr
# printBar() outputs "bar". Do not change or remove this line.
printBar()
self.__cv.notify()
|
FooBar
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis36.py
|
{
"start": 315,
"end": 1397
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis36.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45501056, 47505792]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"line": {"none": True}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
py-pdf__pypdf
|
pypdf/constants.py
|
{
"start": 7814,
"end": 8011
}
|
class ____:
"""§8.9.7 of the 1.7 and 2.0 references."""
AHx = "/AHx"
A85 = "/A85"
LZW = "/LZW"
FL = "/Fl"
RL = "/RL"
CCF = "/CCF"
DCT = "/DCT"
|
FilterTypeAbbreviations
|
python
|
walkccc__LeetCode
|
solutions/1119. Remove Vowels from a String/1119.py
|
{
"start": 0,
"end": 95
}
|
class ____:
def removeVowels(self, s: str) -> str:
return re.sub('a|e|i|o|u', '', s)
|
Solution
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/ndb/property_subclasses/my_models.py
|
{
"start": 2126,
"end": 2349
}
|
class ____(object):
def __init__(self, first, last=None):
assert isinstance(first, date)
assert last is None or isinstance(last, date)
self.first = first
self.last = last or first
|
FuzzyDate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.