language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 27622,
"end": 27718
} | class ____(StateMachineEvent):
__slots__ = ("key",)
key: Key
@dataclass
| CancelComputeEvent |
python | django__django | django/db/models/lookups.py | {
"start": 16999,
"end": 17132
} | class ____(IntegerFieldOverflow, GreaterThan):
underflow_exception = FullResultSet
@IntegerField.register_lookup
| IntegerGreaterThan |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 1043,
"end": 1154
} | class ____(ToolCallStartEvent, Event):
type: EventType = EventType.TOOL_CALL_START
| ToolCallStartWorkflowEvent |
python | getsentry__sentry | tests/sentry/users/web/test_accounts_form.py | {
"start": 175,
"end": 1821
} | class ____(TestCase):
def test_placeholder_username(self) -> None:
username = "test_user"
user = self.create_user(username=username)
relocation_form = RelocationForm(user=user)
assert relocation_form.fields["username"].widget.attrs["placeholder"] == username
def test_clean_username_use_default_username_if_none_entered(self) -> None:
username = "test_user"
user = self.create_user(username=username)
relocation_form = RelocationForm(user=user)
relocation_form.cleaned_data = {"username": ""}
assert relocation_form.clean_username() == username
def test_clean_username_strips_special_chars(self) -> None:
username = "test_user"
user = self.create_user(username=username)
relocation_form = RelocationForm(user=user)
relocation_form.cleaned_data = {"username": "\n\rnew_u\n\n \0se\r\trname\n\n\t\r\0\n"}
assert relocation_form.clean_username() == "new_username"
def test_clean_username_forces_lowercase(self) -> None:
username = "test_user"
user = self.create_user(username=username)
relocation_form = RelocationForm(user=user)
relocation_form.cleaned_data = {"username": "nEw_UsErname"}
assert relocation_form.clean_username() == "new_username"
def test_clean_password(self) -> None:
username = "test_user"
user = self.create_user(username=username)
relocation_form = RelocationForm(user=user)
relocation_form.cleaned_data = {"password": "new_password"}
assert relocation_form.clean_password() == "new_password"
| TestRelocationForm |
python | django__django | django/contrib/auth/hashers.py | {
"start": 6899,
"end": 10669
} | class ____:
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
salt_entropy = 128
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError(
"Couldn't load %r algorithm library: %s"
% (self.__class__.__name__, e)
)
return module
raise ValueError(
"Hasher %r doesn't specify a library attribute" % self.__class__.__name__
)
def salt(self):
"""
Generate a cryptographically secure nonce salt in ASCII with an entropy
of at least `salt_entropy` bits.
"""
# Each character in the salt provides
# log_2(len(alphabet)) bits of entropy.
char_count = math.ceil(self.salt_entropy / math.log2(len(RANDOM_STRING_CHARS)))
return get_random_string(char_count, allowed_chars=RANDOM_STRING_CHARS)
def verify(self, password, encoded):
"""Check if the given password is correct."""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a verify() method"
)
def _check_encode_args(self, password, salt):
if password is None:
raise TypeError("password must be provided.")
if not salt or "$" in force_str(salt): # salt can be str or bytes.
raise ValueError("salt must be provided and cannot contain $.")
def encode(self, password, salt):
"""
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide an encode() method"
)
def decode(self, encoded):
"""
Return a decoded database value.
The result is a dictionary and should contain `algorithm`, `hash`, and
`salt`. Extra keys can be algorithm specific like `iterations` or
`work_factor`.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a decode() method."
)
def safe_summary(self, encoded):
"""
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a safe_summary() method"
)
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn(
"subclasses of BasePasswordHasher should provide a harden_runtime() method"
)
| BasePasswordHasher |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Sphinx.py | {
"start": 3021,
"end": 3414
} | class ____:
def __init__(self, x, y): # [missing-param-doc, missing-type-doc]
"""test_constr_params_in_init_sphinx
Example of a class with missing constructor parameter documentation
(Sphinx style)
Everything is completely analogous to functions.
:param y: bla
missing constructor parameter documentation
"""
pass
| ClassFoo |
python | getsentry__sentry | src/sentry/notifications/notificationcontroller.py | {
"start": 1674,
"end": 23110
} | class ____:
_setting_options: Iterable[NotificationSettingOption] = []
_setting_providers: Iterable[NotificationSettingProvider] = []
def __init__(
self,
recipients: Iterable[Recipient],
project_ids: Iterable[int] | None = None,
organization_id: int | None = None,
type: NotificationSettingEnum | None = None,
provider: ExternalProviderEnum | None = None,
) -> None:
self.project_ids = project_ids
self.organization_id = organization_id
self.type = type
self.provider = provider
self.recipients = list(recipients)
if self.recipients:
query = self._get_query()
type_filter = Q(type=self.type.value) if self.type else Q()
provider_filter = Q(provider=self.provider.value) if self.provider else Q()
self._setting_options = list(
NotificationSettingOption.objects.filter(query & type_filter)
)
self._setting_providers = list(
NotificationSettingProvider.objects.filter(query & type_filter & provider_filter)
)
else:
self._setting_options = []
self._setting_providers = []
@property
def get_all_setting_options(self) -> Iterable[NotificationSettingOption]:
return self._setting_options
@property
def get_all_setting_providers(self) -> Iterable[NotificationSettingProvider]:
return self._setting_providers
def _get_query(self) -> Q:
"""
Generates a query for all settings for a project, org, user, or team.
Args:
recipients: The recipients of the notification settings (user or team).
projects_ids: The projects to get notification settings for.
organization_id: The organization to get notification settings for.
"""
if not self.recipients:
raise Exception("recipient, team_ids, or user_ids must be provided")
user_ids, team_ids = [], []
for recipient in self.recipients:
if recipient_is_user(recipient):
user_ids.append(recipient.id)
elif recipient_is_team(recipient):
team_ids.append(recipient.id)
if not user_ids and not team_ids:
raise Exception("recipients must be either user or team")
project_settings = (
Q(
(Q(user_id__in=user_ids) | Q(team_id__in=team_ids)),
scope_type=NotificationScopeEnum.PROJECT.value,
scope_identifier__in=self.project_ids,
)
if self.project_ids
else Q()
)
org_settings = (
Q(
(Q(user_id__in=user_ids) | Q(team_id__in=team_ids)),
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization_id,
)
if self.organization_id
else Q()
)
user_settings = (
Q(
Q(user_id__in=user_ids),
scope_type=NotificationScopeEnum.USER.value,
scope_identifier__in=user_ids,
)
if user_ids
else Q()
)
team_settings = (
Q(
Q(team_id__in=team_ids),
scope_type=NotificationScopeEnum.TEAM.value,
scope_identifier__in=team_ids,
)
if team_ids
else Q()
)
return project_settings | org_settings | user_settings | team_settings
def _filter_options(
self,
**kwargs,
) -> list[NotificationSettingOption]:
return [
setting
for setting in self.get_all_setting_options
if all(getattr(setting, arg) == kwargs[arg] for arg in kwargs)
]
def _filter_providers(
self,
**kwargs,
) -> list[NotificationSettingProvider]:
return [
setting
for setting in self.get_all_setting_providers
if all(getattr(setting, arg) == kwargs[arg] for arg in kwargs)
]
def _get_layered_setting_options(
self,
project_id: int | None = None,
**kwargs,
) -> MutableMapping[
Recipient, MutableMapping[NotificationSettingEnum, NotificationSettingsOptionEnum]
]:
"""
Returns a mapping of the most specific notification setting options for the given recipients and scopes.
Note that this includes default settings for any notification types that are not set.
Args:
setting_type: If specified, only return settings of this type.
"""
if self.project_ids and len(list(self.project_ids)) > 1 and not project_id:
raise Exception("Must specify project_id if controller has more than 1 projects")
most_specific_setting_options: MutableMapping[
Recipient, MutableMapping[NotificationSettingEnum, NotificationSettingsOptionEnum]
] = defaultdict(
lambda: defaultdict(
lambda: NotificationSettingsOptionEnum.DEFAULT
) # Use lambda to return the default enum value
)
for recipient in self.recipients:
# get the settings for this user/team
filter_kwargs = kwargs.copy()
if recipient_is_user(recipient):
filter_kwargs["user_id"] = recipient.id
elif recipient_is_team(recipient):
filter_kwargs["team_id"] = recipient.id
local_settings = self._filter_options(**filter_kwargs)
local_settings.sort(key=sort_settings_by_scope)
most_specific_recipient_options = most_specific_setting_options[recipient]
for setting in local_settings:
# if we have a project_id, make sure the setting is for that project since
# the controller can be scoped for multiple projects
if (
project_id is not None
and setting.scope_type == NotificationScopeEnum.PROJECT.value
):
if setting.scope_identifier != project_id:
continue
# sort the settings by scope type, with the most specific scope last so we override with the most specific value
most_specific_recipient_options[NotificationSettingEnum(setting.type)] = (
NotificationSettingsOptionEnum(setting.value)
)
# if we have no settings for this user/team, use the defaults
for type, default in get_type_defaults().items():
if type not in most_specific_recipient_options:
most_specific_recipient_options[type] = default
return most_specific_setting_options
def _get_layered_setting_providers(
self,
project_id: int | None = None,
**kwargs,
) -> MutableMapping[
Recipient,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
]:
"""
Returns a mapping of the most specific notification setting providers for the given recipients and scopes.
Note that this includes default settings for any notification types that are not set.
"""
if self.project_ids and len(list(self.project_ids)) > 2 and not project_id:
raise Exception("Must specify project_id if controller has more than 2 projects")
# Now, define your variable using the outermost defaultdict
most_specific_setting_providers: MutableMapping[
Recipient,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
] = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: NotificationSettingsOptionEnum.DEFAULT
) # Use lambda to return the default enum value
)
)
for recipient in self.recipients:
# get the settings for this user/team
filter_kwargs = kwargs.copy()
if recipient_is_user(recipient):
filter_kwargs["user_id"] = recipient.id
elif recipient_is_team(recipient):
filter_kwargs["team_id"] = recipient.id
local_settings = self._filter_providers(**filter_kwargs)
local_settings.sort(key=sort_settings_by_scope)
most_specific_recipient_providers = most_specific_setting_providers[recipient]
for setting in local_settings:
# if we have a project_id, make sure the setting is for that project since
# the controller can be scoped for multiple projects
if (
project_id is not None
and setting.scope_type == NotificationScopeEnum.PROJECT.value
):
if setting.scope_identifier != project_id:
continue
# sort the settings by scope type, with the most specific scope last so we override with the most specific value
most_specific_recipient_providers[NotificationSettingEnum(setting.type)][
ExternalProviderEnum(setting.provider).value
] = NotificationSettingsOptionEnum(setting.value)
# if we have no settings for this user, use the defaults
for type in NotificationSettingEnum:
for provider_str in PERSONAL_NOTIFICATION_PROVIDERS:
provider = ExternalProviderEnum(provider_str)
if provider_str not in most_specific_recipient_providers[type]:
if recipient_is_team(recipient):
most_specific_recipient_providers[type][
provider_str
] = NotificationSettingsOptionEnum.NEVER
else:
most_specific_recipient_providers[type][provider_str] = (
get_default_for_provider(type, provider)
)
return most_specific_setting_providers
def get_combined_settings(
self,
type: NotificationSettingEnum | None = None,
actor_type: ActorType | None = None,
project_id: int | None = None,
) -> MutableMapping[
Recipient,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
]:
"""
Returns the co-leaved settings between the setting options and setting providers
It is as nested as _get_layered_setting_providers by applying the value from the options
to the provider map.
"""
if self.type and type != self.type:
raise Exception("Type mismatch: the provided type differs from the controller type")
kwargs: MutableMapping[str, str] = {}
if type:
kwargs["type"] = type.value
types_to_search = [type] if type else list(NotificationSettingEnum)
setting_options_map = self._get_layered_setting_options(project_id=project_id, **kwargs)
setting_providers_map = self._get_layered_setting_providers(project_id=project_id, **kwargs)
result: MutableMapping[
Recipient,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
] = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: NotificationSettingsOptionEnum.DEFAULT
) # Use lambda to return the default enum value
)
)
for recipient, recipient_options_map in setting_options_map.items():
# check actor type against recipient type
if actor_type:
if actor_type == ActorType.USER and recipient_is_team(recipient):
continue
if actor_type == ActorType.TEAM and recipient_is_user(recipient):
continue
for type in types_to_search:
option_value = recipient_options_map[type]
if option_value == NotificationSettingsOptionEnum.NEVER:
continue
provider_options_map = setting_providers_map[recipient][type]
for provider, provider_value in provider_options_map.items():
if provider_value == NotificationSettingsOptionEnum.NEVER:
continue
# use the option value here as it has more specific information
result[recipient][type][provider] = option_value
return result
def get_notification_recipients(
self,
type: NotificationSettingEnum,
actor_type: ActorType | None = None,
project_id: int | None = None,
) -> Mapping[ExternalProviders, set[Actor]]:
"""
Returns the recipients that should be notified for each provider,
filtered by the given notification type.
Args:
type: The notification type to filter providers and recipients by.
"""
combined_settings = self.get_combined_settings(
type=type, actor_type=actor_type, project_id=project_id
)
recipients: Mapping[ExternalProviders, set[Actor]] = defaultdict(set)
for recipient, type_map in combined_settings.items():
actor = Actor.from_object(recipient)
for type, provider_map in type_map.items():
for provider, value in provider_map.items():
if value == NotificationSettingsOptionEnum.NEVER:
continue
recipients[EXTERNAL_PROVIDERS_REVERSE_VALUES[provider]].add(actor)
return recipients
def get_settings_for_user_by_projects(
self,
user: Recipient,
type: NotificationSettingEnum | None = None,
) -> MutableMapping[
int,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
]:
"""
Returns a mapping of project IDs to enabled notification settings for the given user
with an optional type filter
"""
if not self.project_ids:
raise Exception("Must specify project_ids")
result: MutableMapping[
int,
MutableMapping[
NotificationSettingEnum,
MutableMapping[str, NotificationSettingsOptionEnum],
],
] = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: NotificationSettingsOptionEnum.DEFAULT
) # Use lambda to return the default enum value
)
)
for project_id in self.project_ids:
if not isinstance(project_id, int):
raise Exception("project_ids must be a list of integers")
combined_settings = self.get_combined_settings(type=type, project_id=project_id)
# take the settings for this user and apply it to the project
result[project_id] = combined_settings[user]
return result
def get_subscriptions_status_for_projects(
self,
user: Recipient,
project_ids: Iterable[int],
type: NotificationSettingEnum | None = None,
) -> Mapping[int, GroupSubscriptionStatus]:
"""
Returns whether the user is subscribed for each project.
{project_id -> (is_disabled, is_active, has only inactive subscriptions)}
"""
setting_type = type or self.type
if not setting_type:
raise Exception("Must specify type")
enabled_settings = self.get_settings_for_user_by_projects(user, type=type)
subscription_status_for_projects = {}
for project, type_setting in enabled_settings.items():
has_setting = False
if project not in project_ids:
continue
for t, setting in type_setting.items():
if t != setting_type:
continue
has_setting = True
subscription_status_for_projects[project] = GroupSubscriptionStatus(
is_disabled=setting == {},
is_active=any(
value == NotificationSettingsOptionEnum.ALWAYS for value in setting.values()
),
has_only_inactive_subscriptions=all(
value == NotificationSettingsOptionEnum.NEVER for value in setting.values()
),
)
if not has_setting:
subscription_status_for_projects[project] = GroupSubscriptionStatus(
is_disabled=True, is_active=False, has_only_inactive_subscriptions=True
)
return subscription_status_for_projects
def get_participants(
self,
) -> MutableMapping[Actor, MutableMapping[ExternalProviders, NotificationSettingsOptionEnum]]:
"""
Returns a mapping of recipients to the providers they should be notified on.
Note that this returns the ExternalProviders int enum instead of the ExternalProviderEnum string.
This helper is intended to be used with ParticipantMap, which expected int values.
"""
if not self.type:
raise Exception("Must specify type")
combined_settings = self.get_combined_settings(type=self.type)
user_to_providers: MutableMapping[
Actor, MutableMapping[ExternalProviders, NotificationSettingsOptionEnum]
] = defaultdict(dict)
for recipient, setting_map in combined_settings.items():
actor = Actor.from_object(recipient)
provider_map = setting_map[self.type]
user_to_providers[actor] = {
EXTERNAL_PROVIDERS_REVERSE_VALUES[provider]: value
for provider, value in provider_map.items()
}
return user_to_providers
def user_has_any_provider_settings(self, provider: ExternalProviderEnum | None = None) -> bool:
"""
Returns whether the recipient has any notification settings for the given provider.
Args:
recipient: The recipient of the notification settings (user or team).
provider: The provider to check for.
"""
provider = provider or self.provider
if not provider:
raise Exception("Must specify provider")
settings = self.get_all_setting_providers
for setting in settings:
if setting.provider != provider.value:
continue
if setting.value == NotificationSettingsOptionEnum.ALWAYS.value:
return True
return False
def get_notification_value_for_recipient_and_type(
self, recipient: Recipient, type: NotificationSettingEnum
) -> NotificationSettingsOptionEnum:
"""
Returns the notification setting value for the given recipient and type.
Args:
recipient: The recipient of the notification settings (user or team).
type: The notification type to filter providers and recipients by.
"""
if self.type and type != self.type:
raise Exception("Type mismatch: the provided type differs from the controller type")
option_value_by_recipient_by_type = self._get_layered_setting_options(type=type.value)
option_value_by_type = option_value_by_recipient_by_type[recipient]
value = option_value_by_type[type]
return value
def get_notification_provider_value_for_recipient_and_type(
self, recipient: Recipient, type: NotificationSettingEnum, provider: ExternalProviderEnum
) -> NotificationSettingsOptionEnum:
"""
Returns the notification setting value for the given recipient and type.
Args:
recipient: The recipient of the notification settings (user or team).
type: The notification type to filter providers and recipients by.
"""
provider_str = provider.value
if self.type and type != self.type:
raise Exception("Type mismatch: the provided type differs from the controller type")
setting_providers = self._get_layered_setting_providers(type=type.value)
return setting_providers[recipient][type][provider_str]
def get_users_for_weekly_reports(self) -> list[int]:
if not self.organization_id:
raise Exception("Must specify organization_id")
if self.type != NotificationSettingEnum.REPORTS:
raise Exception(f"Type mismatch: the controller was initialized with type: {self.type}")
recipient_set = self.get_notification_recipients(
type=NotificationSettingEnum.REPORTS,
# only look at users
actor_type=ActorType.USER,
)[
ExternalProviders.EMAIL
] # email only
return [recipient.id for recipient in recipient_set]
| NotificationController |
python | realpython__materials | wordcount/tests/realpython/resources.py | {
"start": 181,
"end": 416
} | class ____:
url: str
title: str
@property
def title_pretty(self):
return self.title
def __str__(self) -> str:
return f"[{self.title_pretty}]({self.url})"
@dataclass(unsafe_hash=True)
| ExternalResource |
python | pytorch__pytorch | test/inductor/test_decompose_mem_bound_mm.py | {
"start": 1794,
"end": 16268
} | class ____(TestCase):
def __init__(self, method_name="runTest", methodName="runTest"):
super().__init__(method_name, methodName)
self.atol = 1e-3
self.rtol = 1e-3
def setup_tolerance(self, rtol=None, atol=None):
if rtol is None:
rtol = self.rtol
if atol is None:
atol = self.atol
self.rtol = rtol
self.atol = atol
def compare_dict_tensors(self, ref_dict, res_dict, rtol=None, atol=None):
self.setup_tolerance(rtol, atol)
if len(set(ref_dict.keys())) != len(set(res_dict.keys())):
return False
for key1 in ref_dict:
key2 = "_orig_mod." + key1
assert key2 in res_dict, f"{key1} does not exist in traced module"
if not torch.allclose(
ref_dict[key1], res_dict[key2], rtol=self.rtol, atol=self.atol
):
return False
return True
def compare_pred(self, module, traced, input, rtol=None, atol=None):
self.setup_tolerance(rtol, atol)
ref = module(*input)
res = traced(*input)
self.assertEqual(ref, res, rtol=self.rtol, atol=self.atol)
def compare_parameters(self, module, traced, rtol=None, atol=None):
self.setup_tolerance(rtol, atol)
ref_params = dict(module.named_parameters())
res_params = dict(traced.named_parameters())
self.assertTrue(
self.compare_dict_tensors(
ref_params, res_params, rtol=self.rtol, atol=self.atol
)
)
def compare_gradients(self, module, traced, rtol=None, atol=None):
self.setup_tolerance(rtol, atol)
ref_grad = {key: param.grad for key, param in module.named_parameters()}
res_grad = {key: param.grad for key, param in traced.named_parameters()}
self.assertTrue(
self.compare_dict_tensors(
ref_grad, res_grad, rtol=self.rtol, atol=self.atol
)
)
@parametrize(
"b,m,k,n,should_decompose",
[(10240, 2, 2, 2, True), (10240, 2, 32, 32, False), (2000, 2, 2, 2, False)],
)
def test_decompose_bmm(self, b, m, n, k, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
mat1 = torch.randn(b, m, k, device=GPU_TYPE).requires_grad_(True)
mat2 = torch.randn(b, k, n, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule2().to(GPU_TYPE)
traced = torch.compile(module)
input = [mat1, mat2]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_bmm"],
expected_val,
)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
expected_val = 3 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_bmm"],
expected_val,
)
counters.clear()
@parametrize(
"b,m,k,n,should_decompose",
[(1, 2, 2, 2, True), (2, 2, 2, 2, False)],
)
def test_decompose_bmm_cpu(self, b, m, n, k, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
mat1 = torch.randn(b, m, k)
mat2 = torch.randn(b, k, n)
counters.clear()
module = MyModule2()
traced = torch.compile(module)
input = [mat1, mat2]
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose else 0
self.assertEqual(
counters["inductor"]["decompose_bmm"],
expected_val,
)
counters.clear()
@parametrize(
"m,k,n, should_decompose",
[(20480, 5, 2, True), (20480, 32, 2, False), (2048, 2, 2, False)],
)
@parametrize("has_bias", [True, False])
def test_decompose_linear(self, m, n, k, has_bias, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule(k, n, has_bias).to(GPU_TYPE)
traced = torch.compile(module)
input = [input]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
if has_bias:
self.assertEqual(
counters["inductor"]["decompose_addmm"],
expected_val,
)
else:
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
decompose_mm_fwd = counters["inductor"]["decompose_mm"]
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
self.assertEqual(
counters["inductor"]["decompose_mm"] - decompose_mm_fwd,
expected_val,
)
counters.clear()
# We have to increase tolerance for navi3 because all fp16, bf16
# GEMMs operations have an accuracy issue caused by hardware limitation
@patch_test_members(
{
"atol": 2e-3 if is_navi3_arch() else 1e-3,
"rtol": 2e-3 if is_navi3_arch() else 1e-3,
}
)
@parametrize(
"m,k,n, should_decompose",
[(20480, 5, 2, True), (20480, 32, 2, False), (2048, 2, 2, False)],
)
@parametrize("has_bias", [True, False])
def test_decompose_linear_mixed_precision(
self, m, n, k, has_bias, should_decompose
):
with torch.amp.autocast(device_type=GPU_TYPE, dtype=torch.bfloat16):
torch._logging.set_logs(inductor=logging.DEBUG)
input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule(k, n, has_bias).to(GPU_TYPE)
traced = torch.compile(module)
input = [input]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
if has_bias:
self.assertEqual(
counters["inductor"]["decompose_addmm"],
expected_val,
)
else:
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
decompose_mm_fwd = counters["inductor"]["decompose_mm"]
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
self.assertEqual(
counters["inductor"]["decompose_mm"] - decompose_mm_fwd,
expected_val,
)
counters.clear()
@parametrize(
"m,k,n, should_decompose",
[(20480, 5, 2, True), (20480, 32, 2, False), (2048, 2, 2, False)],
)
@parametrize("has_bias", [True, False])
def test_decompose_mm(self, m, n, k, has_bias, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule3().to(GPU_TYPE)
traced = torch.compile(module)
input = [mat1, mat2]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
decompose_mm_fwd = counters["inductor"]["decompose_mm"]
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_mm"] - decompose_mm_fwd,
expected_val,
)
counters.clear()
@parametrize(
"m,k,n, should_decompose",
[(1, 64, 16, True), (2, 64, 16, False), (1, 64, 32, True)],
)
def test_decompose_mm_cpu(self, m, n, k, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
mat1 = torch.randn(m, k)
mat2 = torch.randn(k, n)
counters.clear()
module = MyModule3()
traced = torch.compile(module)
input = [mat1, mat2]
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose else 0
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
counters.clear()
# We have to increase tolerance for navi3 because all fp16, bf16
# GEMMs operations have an accuracy issue caused by hardware limitation
@patch_test_members(
{
"atol": 3e-3 if is_navi3_arch() else 1e-3,
"rtol": 4e-3 if is_navi3_arch() else 1e-3,
}
)
@parametrize(
"m,k,n, should_decompose",
[(20480, 5, 2, True), (20480, 32, 2, False), (2048, 2, 2, False)],
)
@parametrize("has_bias", [True, False])
def test_decompose_mm_mixed_precision(self, m, n, k, has_bias, should_decompose):
with torch.amp.autocast(device_type=GPU_TYPE, dtype=torch.bfloat16):
torch._logging.set_logs(inductor=logging.DEBUG)
mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule3().to(GPU_TYPE)
traced = torch.compile(module)
input = [mat1, mat2]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
decompose_mm_fwd = counters["inductor"]["decompose_mm"]
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
self.assertEqual(
counters["inductor"]["decompose_mm"] - decompose_mm_fwd,
expected_val,
)
counters.clear()
@unittest.skip
@parametrize("m,k,n, should_decompose", [(20480, 5, 2, True)])
@parametrize("has_bias", [True, False])
def test_dynamic_shape(self, m, n, k, has_bias, should_decompose):
torch._logging.set_logs(inductor=logging.DEBUG)
input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
counters.clear()
module = MyModule(k, n, has_bias).to(GPU_TYPE)
traced = torch.compile(module, dynamic=True)
input = [input]
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
expected_val = 1 if should_decompose and HAS_GPU_AND_TRITON else 0
if has_bias:
self.assertEqual(
counters["inductor"]["decompose_addmm"],
expected_val,
)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
expected_val = 0
if HAS_GPU_AND_TRITON:
expected_val = 1 if has_bias else 2
self.assertEqual(
counters["inductor"]["decompose_mm"],
expected_val,
)
counters.clear()
def test_realize_input(self):
m = 20480
k = 5
n = 2
torch._logging.set_logs(inductor=logging.DEBUG)
input1 = torch.randn(m, k, device=GPU_TYPE).T.contiguous()
input2 = torch.randn(k, n, device=GPU_TYPE)
@torch.compile()
def foo(x, y):
return x.T.contiguous() @ y
_, code = run_and_get_code(foo, input1, input2)
# two kernels generated
FileCheck().check_count(".run(", 2, exactly=True).run(code[0])
def test_check_device(self):
m = 5
k = 5
n = 2
torch._logging.set_logs(inductor=logging.DEBUG)
input1 = torch.randn(m, k, device=GPU_TYPE)
input2 = torch.randn(k, n, device=GPU_TYPE)
self.assertTrue(check_device(input1, input2, device=GPU_TYPE))
self.assertFalse(check_device(input1, input2, device="cpu"))
input1 = torch.randn(m, k)
input2 = torch.randn(k, n)
self.assertTrue(check_device(input1, input2, device="cpu"))
self.assertFalse(check_device(input1, input2))
input1 = torch.randn(m, k, device=GPU_TYPE)
input2 = torch.randn(k, n)
self.assertFalse(check_device(input1, input2, device="gpu"))
self.assertFalse(check_device(input1, input2, device="cpu"))
self.assertFalse(check_device(input1, input2, device="mtia"))
@torch._inductor.config.patch(
post_grad_fusion_options={
"decompose_mm_pass": {"skip_dynamic_shape_dim_check": True},
}
)
def test_dynamic_shape_decompose_addmm(self):
m, k, n = 19494144, 8, 8
input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(False)
weight = torch.randn(k, n, device=GPU_TYPE).requires_grad_(False)
bias = torch.randn(n, device=GPU_TYPE).requires_grad_(False)
counters.clear()
module = TestDecomposeAddMM().to(GPU_TYPE)
traced = torch.compile(module, dynamic=True)
input = [bias, input, weight]
self.compare_pred(module, traced, input)
self.assertEqual(
counters["inductor"]["decompose_addmm"],
1,
)
counters.clear()
if __name__ == "__main__":
run_tests()
| TestDecomposeMemMM |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/interfaces.py | {
"start": 4199,
"end": 4388
} | class ____(
roles.ColumnsClauseRole, roles.TypedColumnsClauseRole[_T]
):
__slots__ = ()
_role_name = "ORM mapped entity, aliased entity, or Column expression"
| ORMColumnsClauseRole |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 72627,
"end": 78650
} | class ____(GoogleCloudBaseOperator):
"""
Gets a DataScan Data Profile Job resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Profile scan identifier.
:param job_id: Optional. Data Profile scan job identifier.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param wait_for_results: Flag indicating whether to wait for the result of a job execution
or to return the job in its current state.
:param result_timeout: Value in seconds for which operator will wait for the Data Profile scan result
when the flag `wait_for_results = True`.
Throws exception if there is no result found after specified amount of seconds.
:return: Dict representing DataScanJob.
When the job completes with a successful status, information about the Data Profile result
is available.
"""
template_fields = ("project_id", "data_scan_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
data_scan_id: str,
job_id: str | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
wait_for_results: bool = True,
result_timeout: float = 60.0 * 10,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.data_scan_id = data_scan_id
self.job_id = job_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.wait_for_results = wait_for_results
self.result_timeout = result_timeout
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
# fetch the last job
if not self.job_id:
jobs = hook.list_data_scan_jobs(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
job_ids = [DataScanJob.to_dict(job) for job in jobs]
if not job_ids:
raise AirflowException("There are no jobs, you should create one before.")
job_id = job_ids[0]["name"]
self.job_id = job_id.split("/")[-1]
if self.wait_for_results:
job = hook.wait_for_data_scan_job(
job_id=self.job_id,
data_scan_id=self.data_scan_id,
project_id=self.project_id,
region=self.region,
result_timeout=self.result_timeout,
)
else:
job = hook.get_data_scan_job(
project_id=self.project_id,
region=self.region,
job_id=self.job_id,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if job.state == DataScanJob.State.SUCCEEDED:
self.log.info("Data Profile job executed successfully")
else:
self.log.info("Data Profile job execution returned status: %s", job.state)
result = DataScanJob.to_dict(job)
result["state"] = DataScanJob.State(result["state"]).name
return result
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
job = event["job"]
if job_state == DataScanJob.State.FAILED:
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == DataScanJob.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job_id}")
if job_state == DataScanJob.State.SUCCEEDED:
self.log.info("Data Profile job executed successfully")
else:
self.log.info("Data Profile job execution returned status: %s", job_state)
return job
| DataplexGetDataProfileScanResultOperator |
python | lepture__mistune | tests/test_directives.py | {
"start": 2294,
"end": 2954
} | class ____(BaseTestCase):
md = create_markdown(escape=False, plugins=[RSTDirective([Include()])]) # type: ignore[list-item]
def test_html_include(self):
html = self.md.read(os.path.join(ROOT, "include/text.md"))[0]
self.assertIn("Could not include self", html)
self.assertIn("Could not find file", html)
self.assertIn("<div>include html</div>", html)
self.assertIn("<blockquote>", html)
self.assertIn("# Table of Contents", html)
def test_include_missing_source(self):
s = ".. include:: foo.txt"
html = self.md(s)
self.assertIn("Missing source file", html)
| TestDirectiveInclude |
python | python-markdown__markdown | markdown/extensions/sane_lists.py | {
"start": 1336,
"end": 1729
} | class ____(UListProcessor):
""" Override `SIBLING_TAGS` to not include `ol`. """
SIBLING_TAGS = ['ul']
""" Exclude `ol` from list of siblings. """
def __init__(self, parser: blockparser.BlockParser):
super().__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
(self.tab_length - 1))
| SaneUListProcessor |
python | tensorflow__tensorflow | tensorflow/python/framework/test_util.py | {
"start": 85154,
"end": 85739
} | class ____:
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
# TODO(b/286583977): Set it to True and remove.
_ENABLE_AUTO_BOTH_MODES = False
@tf_export("test.TestCase")
| EagerSessionWarner |
python | py-pdf__pypdf | pypdf/_encryption.py | {
"start": 32710,
"end": 32821
} | class ____:
O: bytes # noqa: E741
U: bytes
OE: bytes
UE: bytes
Perms: bytes
| EncryptionValues |
python | pyinstaller__pyinstaller | PyInstaller/lib/modulegraph/modulegraph.py | {
"start": 24056,
"end": 24163
} | class ____(BaseModule):
"""
Graph node representing a non-namespace package.
"""
pass
| Package |
python | walkccc__LeetCode | solutions/477. Total Hamming Distance/477.py | {
"start": 0,
"end": 259
} | class ____:
def totalHammingDistance(self, nums: list[int]) -> int:
MAX_BIT = 30
ans = 0
for i in range(MAX_BIT):
ones = sum(num & (1 << i) > 0 for num in nums)
zeros = len(nums) - ones
ans += ones * zeros
return ans
| Solution |
python | pypa__pipenv | pipenv/vendor/tomlkit/exceptions.py | {
"start": 1528,
"end": 1766
} | class ____(ParseError):
"""
A date field was improperly specified.
"""
def __init__(self, line: int, col: int) -> None:
message = "Invalid date"
super().__init__(line, col, message=message)
| InvalidDateError |
python | pytorch__pytorch | torch/_export/wrappers.py | {
"start": 847,
"end": 12513
} | class ____(HigherOrderOperator):
def __init__(self):
super().__init__("_export_tracepoint")
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
_export_tracepoint = ExportTracepoint()
@_export_tracepoint.py_impl(ProxyTorchDispatchMode)
def export_tracepoint_dispatch_mode(mode, *args, **kwargs):
p_args, p_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, (args, kwargs))
proxy = mode.tracer.create_proxy(
"call_function", _export_tracepoint, p_args, p_kwargs
)
return track_tensor_tree(args, proxy, constant=None, tracer=mode.tracer)
@_export_tracepoint.py_impl(FakeTensorMode)
def export_tracepoint_fake_tensor_mode(mode, *args, **kwargs):
with mode:
return args
@_export_tracepoint.py_functionalize_impl
def export_tracepoint_functional(ctx, *args, **kwargs):
unwrapped_args = ctx.unwrap_tensors(args)
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
with ctx.redispatch_to_next():
_export_tracepoint(*unwrapped_args, **unwrapped_kwargs)
return args
_export_tracepoint.py_impl(DispatchKey.Autograd)(
autograd_not_implemented(_export_tracepoint, deferred_error=True)
)
@_export_tracepoint.py_impl(DispatchKey.CPU)
def export_tracepoint_cpu(*args, **kwargs):
return args
def _wrap_submodule(mod, path, module_call_specs):
assert isinstance(mod, torch.nn.Module)
assert path != ""
submodule = torch.fx.graph_module._get_attr(mod, path)
def update_module_call_signatures(path, in_spec, out_spec):
if path in module_call_specs:
assert module_call_specs[path]["in_spec"] == in_spec
assert module_call_specs[path]["out_spec"] == out_spec
module_call_specs[path] = {"in_spec": in_spec, "out_spec": out_spec}
def check_flattened(flat_args):
for a in flat_args:
if not (isinstance(a, (torch.Tensor, str, int, float, bool)) or a is None):
raise AssertionError(
f"Only Tensors or scalars are supported as pytree flattened inputs, got: {a}"
)
def pre_hook(module, args, kwargs):
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
check_flattened(flat_args)
flat_args = _export_tracepoint(*flat_args, kind="module_call_inputs", path=path)
args, kwargs = pytree.tree_unflatten(flat_args, in_spec)
return args, kwargs
def post_hook(module, args, kwargs, res):
_, in_spec = pytree.tree_flatten((args, kwargs))
flat_res, out_spec = pytree.tree_flatten(res)
check_flattened(flat_res)
flat_res = _export_tracepoint(*flat_res, kind="module_call_outputs", path=path)
update_module_call_signatures(path, in_spec, out_spec)
return pytree.tree_unflatten(flat_res, out_spec)
pre_handle = submodule.register_forward_pre_hook(pre_hook, with_kwargs=True)
post_handle = submodule.register_forward_hook(post_hook, with_kwargs=True)
return pre_handle, post_handle
@contextmanager
def _wrap_submodules(f, preserve_signature, module_call_signatures):
handles = []
try:
for path in preserve_signature:
handles.extend(_wrap_submodule(f, path, module_call_signatures))
yield
finally:
for handle in handles:
handle.remove()
def _mark_strict_experimental(cls):
def call(self, *args):
return strict_mode(self, args)
cls.__call__ = call
return cls
def _register_func_spec_proxy_in_tracer(tracer, name, spec):
"""
This is a wrapper utility method on top of tracer to cache the
already registered subclass spec attribute. This is useful because
Subclass.__init__ will be same for each subclass. By default, fx will
create multiple attributes/proxies for given attribute.
"""
fx_name = name + "0"
if hasattr(tracer.root, fx_name):
assert getattr(tracer.root, fx_name) == spec
return tracer.create_proxy("get_attr", fx_name, (), {})
qualname = tracer.get_fresh_qualname(name)
setattr(tracer.root, qualname, spec)
return tracer.create_proxy("get_attr", qualname, (), {})
def _emit_flat_apply_call(
*,
tracer,
spec_name: str,
const_target_for_apply,
graphable_args,
track_value,
call_spec_cache_key: str,
):
# Flatten to graphable form and record the spec on the FX root
flat_args, in_spec = to_graphable(graphable_args)
qualname = tracer.get_fresh_qualname(spec_name) # type: ignore[union-attr]
setattr(tracer.root, qualname, in_spec) # type: ignore[union-attr]
spec_proxy = tracer.create_proxy("get_attr", qualname, (), {})
# Reuse/cached ConstantFunction spec on the root
_, func_spec = pytree.tree_flatten(_ConstantFunction(const_target_for_apply))
func_spec_proxy = _register_func_spec_proxy_in_tracer(
tracer, f"{call_spec_cache_key}_const_func_spec", func_spec
)
# Map runtime args -> proxies (always via tracer.unwrap_proxy now)
flat_proxy_args = pytree.tree_map(tracer.unwrap_proxy, flat_args)
# Emit flat_apply and track result structure
out_proxy = tracer.create_proxy(
"call_function", flat_apply, (func_spec_proxy, spec_proxy, *flat_proxy_args), {}
)
track_tensor_tree(track_value, out_proxy, constant=None, tracer=tracer)
def _is_init(fn):
return callable(fn) and fn.__name__ == "__init__"
def mark_subclass_constructor_exportable_experimental(constructor_subclass):
"""
Experimental decorator that makes subclass to be traceable in export
with pre-dispatch IR. To make your subclass traceble in export, you need to:
1. Implement __init__ method for your subclass (Look at DTensor implementation)
2. Decorate your __init__ method with _mark_constructor_exportable_experimental
3. Put torch._dynamo_disable decorator to prevent dynamo from peeking into its' impl
Example:
class FooTensor(torch.Tensor):
@staticmethod
def __new__(cls, elem, *, requires_grad=False):
# ...
return torch.Tensor._make_subclass(cls, elem, requires_grad=requires_grad)
@torch._dynamo_disable
@mark_subclass_constructor_exportable_experimental
def __init__(self, elem, ...):
# ...
"""
if not _is_init(constructor_subclass):
raise RuntimeError(
f"torch._export.wrappers.mark_constructor_exportable_experimental can only be applied on subclass tensor.__init__"
f"But, you are adding it on {constructor_subclass.__name__} which is not supported. "
f"If __init__ doesn't exist on your subclass, please add it. Look at DTensor.__init__ implementation for example"
)
def wrapper(*args, **kwargs):
constructor_subclass(*args, **kwargs)
if not torch.compiler.is_exporting():
return
if not is_traceable_wrapper_subclass_type(type(args[0])):
assert constructor_subclass.__qualname__.endswith("__init__")
obj_name = constructor_subclass.__qualname__[: -len("__init__")]
raise RuntimeError(
f"Can't intercept {obj_name} in export because this object is not a traceable "
f"tensor subclass. Please look at DTensor.__init__ implementation as an example of proper usage of this API."
)
mode = _maybe_find_pre_dispatch_tf_mode_for_export()
if mode is None:
return
assert isinstance(mode, PreDispatchTorchFunctionMode)
tracer = mode.tracer
subclass = args[0]
graphable = (tuple(args[1:]), kwargs)
spec_name = "_".join(constructor_subclass.__qualname__.lower().split("."))
call_spec_cache_key = type(subclass).__name__.lower()
_emit_flat_apply_call(
tracer=tracer,
spec_name=spec_name,
const_target_for_apply=type(subclass),
graphable_args=graphable,
track_value=subclass, # track the constructed subclass instance
call_spec_cache_key=call_spec_cache_key,
)
return
return wrapper
def allow_in_pre_dispatch_graph(func):
"""
Experimental decorator that adds user function to export pre-dispatch graph. Note that
we only support custom autograd function/subclass constructors today. To use this function:
1. For subclasses:
1. refer to instructions in mark_subclass_constructor_exportable_experimental
2. Define apply method on your custom autograd function and apply this decorator.
Example:
class MyCoolCustomAutogradFunc(autograd.Function):
@classmethod
@torch._export.wrappers.allow_in_pre_dispatch_graph
def apply(cls, *args, **kwargs):
return super(MyCoolCustomAutogradFunc, cls).apply(*args, **kwargs)
"""
if _is_init(func):
return mark_subclass_constructor_exportable_experimental(func)
if not (_is_init(func) or func.__name__ == "apply"):
raise RuntimeError(
f"torch._export.wrappers.allow_in_pre_dispatch_graph can only be applied on subclass tensor.__init_ "
f"or custom_autograd_function.apply. "
f"But, you are adding it on {func.__name__} which is not supported. "
f"If __init__ doesn't exist on your subclass, please add it. Look at DTensor.__init__ implementation for example. "
f"If you are adding it on custom autograd function, please add it on apply method. "
f"If anything else, file an issue on github and we may consider extending our support. "
)
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.compiler.is_exporting():
return func(*args, **kwargs)
if not inspect.isclass(args[0]):
return func(*args, **kwargs)
if not issubclass(args[0], torch.autograd.Function):
return func(*args, **kwargs)
from torch._ops import _get_dispatch_mode_pre_dispatch
mode = _get_dispatch_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY)
if mode is None:
return func(*args, **kwargs)
# Sometimes custom autograd functions can call into HOPs that don't have proxy impl
# at PreDispatch level, so we just dispatch it below to get the concrete result.
include_to_set = torch._C._dispatch_tls_local_include_set().remove(
torch._C.DispatchKey.PreDispatch
)
exclude_to_set = (
torch._C._dispatch_tls_local_exclude_set()
| torch._C.DispatchKeySet(torch._C.DispatchKey.PreDispatch)
)
with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set):
out = func(*args, **kwargs)
assert mode.pre_dispatch, "Should only do this in predispatch"
tracer = mode.tracer
function_cls_name = f"{args[0].__module__}.{args[0].__qualname__}"
graphable = ((function_cls_name, *args[1:]), kwargs)
from torch.export.custom_ops import (
_call_custom_autograd_function_in_pre_dispatch,
)
spec_name = "_".join(function_cls_name.split("."))
call_spec_cache_key = type(
_call_custom_autograd_function_in_pre_dispatch
).__name__.lower()
_emit_flat_apply_call(
tracer=tracer,
spec_name=spec_name,
const_target_for_apply=_call_custom_autograd_function_in_pre_dispatch,
graphable_args=graphable,
track_value=out,
call_spec_cache_key=call_spec_cache_key,
)
return out
return wrapper
| ExportTracepoint |
python | django__django | tests/test_utils/tests.py | {
"start": 76008,
"end": 80868
} | class ____(TestCase):
databases = {"default", "other"}
callback_called = False
def enqueue_callback(self, using="default"):
def hook():
self.callback_called = True
transaction.on_commit(hook, using=using)
def test_no_arguments(self):
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_using(self):
with self.captureOnCommitCallbacks(using="other") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_different_using(self):
with self.captureOnCommitCallbacks(using="default") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(callbacks, [])
def test_execute(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
def test_pre_callback(self):
def pre_hook():
pass
transaction.on_commit(pre_hook, using="default")
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertNotEqual(callbacks[0], pre_hook)
def test_with_rolled_back_savepoint(self):
with self.captureOnCommitCallbacks() as callbacks:
try:
with transaction.atomic():
self.enqueue_callback()
raise IntegrityError
except IntegrityError:
# Inner transaction.atomic() has been rolled back.
pass
self.assertEqual(callbacks, [])
def test_execute_recursive(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(self.enqueue_callback)
self.assertEqual(len(callbacks), 2)
self.assertIs(self.callback_called, True)
def test_execute_tree(self):
"""
A visualisation of the callback tree tested. Each node is expected to
be visited only once:
└─branch_1
├─branch_2
│ ├─leaf_1
│ └─leaf_2
└─leaf_3
"""
branch_1_call_counter = 0
branch_2_call_counter = 0
leaf_1_call_counter = 0
leaf_2_call_counter = 0
leaf_3_call_counter = 0
def leaf_1():
nonlocal leaf_1_call_counter
leaf_1_call_counter += 1
def leaf_2():
nonlocal leaf_2_call_counter
leaf_2_call_counter += 1
def leaf_3():
nonlocal leaf_3_call_counter
leaf_3_call_counter += 1
def branch_1():
nonlocal branch_1_call_counter
branch_1_call_counter += 1
transaction.on_commit(branch_2)
transaction.on_commit(leaf_3)
def branch_2():
nonlocal branch_2_call_counter
branch_2_call_counter += 1
transaction.on_commit(leaf_1)
transaction.on_commit(leaf_2)
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(branch_1)
self.assertEqual(branch_1_call_counter, 1)
self.assertEqual(branch_2_call_counter, 1)
self.assertEqual(leaf_1_call_counter, 1)
self.assertEqual(leaf_2_call_counter, 1)
self.assertEqual(leaf_3_call_counter, 1)
self.assertEqual(callbacks, [branch_1, branch_2, leaf_3, leaf_1, leaf_2])
def test_execute_robust(self):
class MyException(Exception):
pass
def hook():
self.callback_called = True
raise MyException("robust callback")
with self.assertLogs("django.test", "ERROR") as cm:
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(hook, robust=True)
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
log_record = cm.records[0]
self.assertEqual(
log_record.getMessage(),
"Error calling CaptureOnCommitCallbacksTests.test_execute_robust.<locals>."
"hook in on_commit() (robust callback).",
)
self.assertIsNotNone(log_record.exc_info)
raised_exception = log_record.exc_info[1]
self.assertIsInstance(raised_exception, MyException)
self.assertEqual(str(raised_exception), "robust callback")
| CaptureOnCommitCallbacksTests |
python | bottlepy__bottle | bottle.py | {
"start": 81096,
"end": 82787
} | class ____:
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_spec(self, fullname, path, target=None):
if '.' not in fullname: return
if fullname.rsplit('.', 1)[0] != self.name: return
from importlib.util import spec_from_loader
return spec_from_loader(fullname, self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
if fullname.rsplit('.', 1)[0] != self.name: return
return self
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass # This probably breaks importlib.reload() :/
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
| _ImportRedirect |
python | doocs__leetcode | solution/1500-1599/1569.Number of Ways to Reorder Array to Get Same BST/Solution.py | {
"start": 0,
"end": 693
} | class ____:
def numOfWays(self, nums: List[int]) -> int:
def dfs(nums):
if len(nums) < 2:
return 1
left = [x for x in nums if x < nums[0]]
right = [x for x in nums if x > nums[0]]
m, n = len(left), len(right)
a, b = dfs(left), dfs(right)
return (((c[m + n][m] * a) % mod) * b) % mod
n = len(nums)
mod = 10**9 + 7
c = [[0] * n for _ in range(n)]
c[0][0] = 1
for i in range(1, n):
c[i][0] = 1
for j in range(1, i + 1):
c[i][j] = (c[i - 1][j] + c[i - 1][j - 1]) % mod
return (dfs(nums) - 1 + mod) % mod
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/sql.py | {
"start": 4984,
"end": 6878
} | class ____(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
.. versionadded:: 1.5
"""
name = 'PostgreSQL SQL dialect'
aliases = ['postgresql', 'postgres']
mimetypes = ['text/x-postgresql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*\n?', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join(s.replace(" ", "\s+")
for s in DATATYPES + PSEUDO_TYPES)
+ r')\b', Name.Builtin),
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'::', Operator), # cast
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'),
# quoted identifier
(r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'),
(r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback),
(r'[a-z_]\w*', Name),
# psql variable in SQL
(r""":(['"]?)[a-z]\w*\b\1""", Name.Variable),
(r'[;:()\[\]{},.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
'string': [
(r"[^']+", String.Single),
(r"''", String.Single),
(r"'", String.Single, '#pop'),
],
'quoted-ident': [
(r'[^"]+', String.Name),
(r'""', String.Name),
(r'"', String.Name, '#pop'),
],
}
| PostgresLexer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 280797,
"end": 281259
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of ReopenIssue"""
__schema__ = github_schema
__field_names__ = ("issue_id", "client_mutation_id")
issue_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="issueId")
"""ID of the issue to be opened."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| ReopenIssueInput |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 19586,
"end": 20066
} | class ____(_Multi2VecBase):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.MULTI2VEC_NVIDIA, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
model: Optional[str]
truncation: Optional[bool]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
| _Multi2VecNvidiaConfig |
python | walkccc__LeetCode | solutions/3232. Find if Digit Game Can Be Won/3232.py | {
"start": 0,
"end": 129
} | class ____:
def canAliceWin(self, nums: list[int]) -> bool:
return sum(num if num < 10 else -num for num in nums) != 0
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_container_state_terminated.py | {
"start": 383,
"end": 9083
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container_id': 'str',
'exit_code': 'int',
'finished_at': 'datetime',
'message': 'str',
'reason': 'str',
'signal': 'int',
'started_at': 'datetime'
}
attribute_map = {
'container_id': 'containerID',
'exit_code': 'exitCode',
'finished_at': 'finishedAt',
'message': 'message',
'reason': 'reason',
'signal': 'signal',
'started_at': 'startedAt'
}
def __init__(self, container_id=None, exit_code=None, finished_at=None, message=None, reason=None, signal=None, started_at=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerStateTerminated - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container_id = None
self._exit_code = None
self._finished_at = None
self._message = None
self._reason = None
self._signal = None
self._started_at = None
self.discriminator = None
if container_id is not None:
self.container_id = container_id
self.exit_code = exit_code
if finished_at is not None:
self.finished_at = finished_at
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
if signal is not None:
self.signal = signal
if started_at is not None:
self.started_at = started_at
@property
def container_id(self):
"""Gets the container_id of this V1ContainerStateTerminated. # noqa: E501
Container's ID in the format '<type>://<container_id>' # noqa: E501
:return: The container_id of this V1ContainerStateTerminated. # noqa: E501
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""Sets the container_id of this V1ContainerStateTerminated.
Container's ID in the format '<type>://<container_id>' # noqa: E501
:param container_id: The container_id of this V1ContainerStateTerminated. # noqa: E501
:type: str
"""
self._container_id = container_id
@property
def exit_code(self):
"""Gets the exit_code of this V1ContainerStateTerminated. # noqa: E501
Exit status from the last termination of the container # noqa: E501
:return: The exit_code of this V1ContainerStateTerminated. # noqa: E501
:rtype: int
"""
return self._exit_code
@exit_code.setter
def exit_code(self, exit_code):
"""Sets the exit_code of this V1ContainerStateTerminated.
Exit status from the last termination of the container # noqa: E501
:param exit_code: The exit_code of this V1ContainerStateTerminated. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and exit_code is None: # noqa: E501
raise ValueError("Invalid value for `exit_code`, must not be `None`") # noqa: E501
self._exit_code = exit_code
@property
def finished_at(self):
"""Gets the finished_at of this V1ContainerStateTerminated. # noqa: E501
Time at which the container last terminated # noqa: E501
:return: The finished_at of this V1ContainerStateTerminated. # noqa: E501
:rtype: datetime
"""
return self._finished_at
@finished_at.setter
def finished_at(self, finished_at):
"""Sets the finished_at of this V1ContainerStateTerminated.
Time at which the container last terminated # noqa: E501
:param finished_at: The finished_at of this V1ContainerStateTerminated. # noqa: E501
:type: datetime
"""
self._finished_at = finished_at
@property
def message(self):
"""Gets the message of this V1ContainerStateTerminated. # noqa: E501
Message regarding the last termination of the container # noqa: E501
:return: The message of this V1ContainerStateTerminated. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ContainerStateTerminated.
Message regarding the last termination of the container # noqa: E501
:param message: The message of this V1ContainerStateTerminated. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ContainerStateTerminated. # noqa: E501
(brief) reason from the last termination of the container # noqa: E501
:return: The reason of this V1ContainerStateTerminated. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ContainerStateTerminated.
(brief) reason from the last termination of the container # noqa: E501
:param reason: The reason of this V1ContainerStateTerminated. # noqa: E501
:type: str
"""
self._reason = reason
@property
def signal(self):
"""Gets the signal of this V1ContainerStateTerminated. # noqa: E501
Signal from the last termination of the container # noqa: E501
:return: The signal of this V1ContainerStateTerminated. # noqa: E501
:rtype: int
"""
return self._signal
@signal.setter
def signal(self, signal):
"""Sets the signal of this V1ContainerStateTerminated.
Signal from the last termination of the container # noqa: E501
:param signal: The signal of this V1ContainerStateTerminated. # noqa: E501
:type: int
"""
self._signal = signal
@property
def started_at(self):
"""Gets the started_at of this V1ContainerStateTerminated. # noqa: E501
Time at which previous execution of the container started # noqa: E501
:return: The started_at of this V1ContainerStateTerminated. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this V1ContainerStateTerminated.
Time at which previous execution of the container started # noqa: E501
:param started_at: The started_at of this V1ContainerStateTerminated. # noqa: E501
:type: datetime
"""
self._started_at = started_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerStateTerminated):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerStateTerminated):
return True
return self.to_dict() != other.to_dict()
| V1ContainerStateTerminated |
python | facebookresearch__faiss | contrib/datasets.py | {
"start": 10311,
"end": 12275
} | class ____(Dataset):
"""
The original dataset is available at: http://corpus-texmex.irisa.fr/
(ANN_GIST1M)
"""
def __init__(self):
Dataset.__init__(self)
self.d, self.nt, self.nb, self.nq = 960, 100000, 1000000, 10000
self.basedir = dataset_basedir + 'gist1M/'
def get_queries(self):
return fvecs_read(self.basedir + "gist_query.fvecs")
def get_train(self, maxtrain=None):
maxtrain = maxtrain if maxtrain is not None else self.nt
return fvecs_read(self.basedir + "gist_learn.fvecs")[:maxtrain]
def get_database(self):
return fvecs_read(self.basedir + "gist_base.fvecs")
def get_groundtruth(self, k=None):
gt = ivecs_read(self.basedir + "gist_groundtruth.ivecs")
if k is not None:
assert k <= 100
gt = gt[:, :k]
return gt
def dataset_from_name(dataset='deep1M', download=False):
""" converts a string describing a dataset to a Dataset object
Supports sift1M, bigann1M..bigann1B, deep1M..deep1B, music-100 and glove
"""
if dataset == 'sift1M':
return DatasetSIFT1M()
elif dataset == 'gist1M':
return DatasetGIST1M()
elif dataset.startswith('bigann'):
dbsize = 1000 if dataset == "bigann1B" else int(dataset[6:-1])
return DatasetBigANN(nb_M=dbsize)
elif dataset.startswith("deep"):
szsuf = dataset[4:]
if szsuf[-1] == 'M':
dbsize = 10 ** 6 * int(szsuf[:-1])
elif szsuf == '1B':
dbsize = 10 ** 9
elif szsuf[-1] == 'k':
dbsize = 1000 * int(szsuf[:-1])
else:
assert False, "did not recognize suffix " + szsuf
return DatasetDeep1B(nb=dbsize)
elif dataset == "music-100":
return DatasetMusic100()
elif dataset == "glove":
return DatasetGlove(download=download)
else:
raise RuntimeError("unknown dataset " + dataset)
| DatasetGIST1M |
python | ZoranPandovski__al-go-rithms | games/Python/paddleball.py | {
"start": 3535,
"end": 4695
} | class ____:
def __init__(self, canvas, color):
self.score = 0
self.canvas = canvas
self.id = canvas.create_text(450, 40, text=self.score, fill=color, font=('Courier', 40))
def hit(self):
self.score += 1
self.canvas.itemconfig(self.id, text=self.score)
tk = Tk()
tk.title("Game")
tk.resizable(0,0)
tk.wm_attributes("-topmost", 1) #putting "game" as topmost tab
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
canvas.create_text(340, 40, text="SCORE:", fill='green', font=('Courier', 40))
score = Score(canvas, 'green')
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, paddle, score, 'red')
game_over_text = canvas.create_text(250, 200, text='GAME OVER', font=('Georgia', 60),state='hidden')
while 1:
if ball.hit_bottom == False and paddle.started == True:
ball.draw()
paddle.draw()
if ball.hit_bottom == True:
time.sleep(1)
canvas.itemconfig(game_over_text, state='normal')
break
tk.update_idletasks()
tk.update()
time.sleep(0.01) #constantly updating canvas(loops as True(can't be false))
| Score |
python | scipy__scipy | benchmarks/benchmarks/optimize_linprog.py | {
"start": 6929,
"end": 8268
} | class ____(Benchmark):
params = [
methods,
infeasible_problems
]
param_names = ['method', 'problems']
def setup(self, meth, prob):
if prob not in enabled_infeasible_problems:
raise NotImplementedError("skipped")
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = os.path.join(dir_path, "linprog_benchmark_files",
"infeasible", prob + ".npz")
data = np.load(datafile, allow_pickle=True)
self.c = data["c"]
self.A_eq = data["A_eq"]
self.A_ub = data["A_ub"]
self.b_ub = data["b_ub"]
self.b_eq = data["b_eq"]
self.bounds = np.squeeze(data["bounds"])
self.status = None
def time_netlib_infeasible(self, meth, prob):
method, options = meth
res = linprog(c=self.c,
A_ub=self.A_ub,
b_ub=self.b_ub,
A_eq=self.A_eq,
b_eq=self.b_eq,
bounds=self.bounds,
method=method,
options=options)
self.status = res.status
def track_netlib_infeasible(self, meth, prob):
if self.status is None:
self.time_netlib_infeasible(meth, prob)
return self.status
| Netlib_infeasible |
python | keras-team__keras | keras/src/trainers/trainer.py | {
"start": 637,
"end": 52222
} | class ____:
def __init__(self):
self._lock = False
self._run_eagerly = False
self._jit_compile = None
self.compiled = False
self.loss = None
self.steps_per_execution = 1
# Can be set by callbacks in on_train_begin
self._initial_epoch = None
self._compute_loss_has_training_arg = (
"training" in inspect.signature(self.compute_loss).parameters
)
# Placeholders used in `compile`
self._compile_loss = None
self._compile_metrics = None
self._loss_tracker = None
@traceback_utils.filter_traceback
@tracking.no_automatic_dependency_tracking
def compile(
self,
optimizer="rmsprop",
loss=None,
loss_weights=None,
metrics=None,
weighted_metrics=None,
run_eagerly=False,
steps_per_execution=1,
jit_compile="auto",
auto_scale_loss=True,
):
"""Configures the model for training.
Example:
```python
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.BinaryCrossentropy(),
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
],
)
```
Args:
optimizer: String (name of optimizer) or optimizer instance. See
`keras.optimizers`.
loss: Loss function. May be a string (name of loss function), or
a `keras.losses.Loss` instance. See `keras.losses`. A
loss function is any callable with the signature
`loss = fn(y_true, y_pred)`, where `y_true` are the ground truth
values, and `y_pred` are the model's predictions.
`y_true` should have shape `(batch_size, d0, .. dN)`
(except in the case of sparse loss functions such as
sparse categorical crossentropy which expects integer arrays of
shape `(batch_size, d0, .. dN-1)`).
`y_pred` should have shape `(batch_size, d0, .. dN)`.
The loss function should return a float tensor.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions of
different model outputs. The loss value that will be minimized
by the model will then be the *weighted sum* of all individual
losses, weighted by the `loss_weights` coefficients. If a list,
it is expected to have a 1:1 mapping to the model's outputs. If
a dict, it is expected to map output names (strings) to scalar
coefficients.
metrics: List of metrics to be evaluated by the model during
training and testing. Each of this can be a string (name of a
built-in function), function or a `keras.metrics.Metric`
instance. See `keras.metrics`. Typically you will use
`metrics=['accuracy']`. A function is any callable with the
signature `result = fn(y_true, _pred)`. To specify different
metrics for different outputs of a multi-output model, you could
also pass a dictionary, such as
`metrics={'a':'accuracy', 'b':['accuracy', 'mse']}`.
You can also pass a list to specify a metric or a list of
metrics for each output, such as
`metrics=[['accuracy'], ['accuracy', 'mse']]`
or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass
the strings 'accuracy' or 'acc', we convert this to one of
`keras.metrics.BinaryAccuracy`,
`keras.metrics.CategoricalAccuracy`,
`keras.metrics.SparseCategoricalAccuracy` based on the
shapes of the targets and of the model output. A similar
conversion is done for the strings `"crossentropy"`
and `"ce"` as well.
The metrics passed here are evaluated without sample weighting;
if you would like sample weighting to apply, you can specify
your metrics via the `weighted_metrics` argument instead.
weighted_metrics: List of metrics to be evaluated and weighted by
`sample_weight` or `class_weight` during training and testing.
run_eagerly: Bool. If `True`, this model's forward pass
will never be compiled. It is recommended to leave this
as `False` when training (for best performance),
and to set it to `True` when debugging.
steps_per_execution: Int. The number of batches to run
during each a single compiled function call. Running multiple
batches inside a single compiled function call can
greatly improve performance on TPUs or small models with a large
Python overhead. At most, one full epoch will be run each
execution. If a number larger than the size of the epoch is
passed, the execution will be truncated to the size of the
epoch. Note that if `steps_per_execution` is set to `N`,
`Callback.on_batch_begin` and `Callback.on_batch_end` methods
will only be called every `N` batches (i.e. before/after
each compiled function execution).
Not supported with the PyTorch backend.
jit_compile: Bool or `"auto"`. Whether to use XLA compilation when
compiling a model. For `jax` and `tensorflow` backends,
`jit_compile="auto"` enables XLA compilation if the model
supports it, and disabled otherwise.
For `torch` backend, `"auto"` will default to eager
execution and `jit_compile=True` will run with `torch.compile`
with the `"inductor"` backend.
auto_scale_loss: Bool. If `True` and the model dtype policy is
`"mixed_float16"`, the passed optimizer will be automatically
wrapped in a `LossScaleOptimizer`, which will dynamically
scale the loss to prevent underflow.
"""
optimizer = optimizers.get(optimizer)
self.optimizer = optimizer
if (
auto_scale_loss
and self.dtype_policy.name == "mixed_float16"
and self.optimizer
and not isinstance(self.optimizer, LossScaleOptimizer)
):
self.optimizer = LossScaleOptimizer(
self.optimizer, name="loss_scale_optimizer"
)
if hasattr(self, "output_names"):
output_names = self.output_names
else:
output_names = None
if loss is not None:
self._compile_loss = CompileLoss(
loss, loss_weights, output_names=output_names
)
self.loss = loss
if metrics is not None or weighted_metrics is not None:
self._compile_metrics = CompileMetrics(
metrics, weighted_metrics, output_names=output_names
)
if jit_compile == "auto":
if run_eagerly:
jit_compile = False
else:
jit_compile = self._resolve_auto_jit_compile()
if jit_compile and run_eagerly:
jit_compile = False
warnings.warn(
"If `run_eagerly` is True, then `jit_compile` "
"cannot also be True. Disabling `jit_compile`.",
stacklevel=2,
)
self.jit_compile = jit_compile
self.run_eagerly = run_eagerly
self.stop_training = False
self.compiled = True
self._loss_tracker = metrics_module.Mean(name="loss")
self.steps_per_execution = steps_per_execution
self.train_function = None
self.test_function = None
self.predict_function = None
self._compile_config = serialization_lib.SerializableDict(
optimizer=optimizer,
loss=loss,
loss_weights=loss_weights,
metrics=metrics,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
jit_compile=jit_compile,
)
@property
def jit_compile(self):
if self._jit_compile is None:
# Value was never set. Resolve it now.
self._jit_compile = self._resolve_auto_jit_compile()
return self._jit_compile
@jit_compile.setter
def jit_compile(self, value):
if value and not model_supports_jit(self):
warnings.warn(
"Model doesn't support `jit_compile=True`. "
"Proceeding with `jit_compile=False`."
)
self._jit_compile = False
else:
self._jit_compile = value
def _resolve_auto_jit_compile(self):
if backend.backend() == "torch":
# jit_compile = "auto" with the pytorch backend defaults to eager
return False
if backend.backend() == "tensorflow":
import tensorflow as tf
devices = tf.config.list_physical_devices()
if not list(filter(lambda x: x.device_type != "CPU", devices)):
# Disable XLA on CPU-only machines.
return False
if self._distribute_strategy:
# Disable XLA with tf.distribute
return False
if model_supports_jit(self):
return True
return False
@property
def run_eagerly(self):
return self._run_eagerly
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
@property
def metrics(self):
# Order: loss tracker, individual loss trackers, compiled metrics,
# custom metrics, sublayer metrics.
metrics = []
if self.compiled:
if self._loss_tracker is not None:
metrics.append(self._loss_tracker)
if self._compile_metrics is not None:
metrics.append(self._compile_metrics)
if self._compile_loss is not None:
metrics.extend(self._compile_loss.metrics)
metrics.extend(self._metrics)
for layer in self._flatten_layers(include_self=False):
if isinstance(layer, Trainer):
# All Trainer-related metrics in sublayers should be ignored
# because a new Trainer has been instantiated.
continue
metrics.extend(layer.metrics)
return metrics
@property
def metrics_names(self):
return [m.name for m in self.metrics]
def reset_metrics(self):
for m in self.metrics:
m.reset_state()
def _get_own_metrics(self):
metrics = []
if self._loss_tracker is not None:
metrics.append(self._loss_tracker)
if self._compile_metrics is not None:
metrics.append(self._compile_metrics)
if self._compile_loss is not None:
metrics.extend(self._compile_loss.metrics)
metrics.extend(self._metrics)
return metrics
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
"""Compute the total loss, validate it, and return it.
Subclasses can optionally override this method to provide custom loss
computation logic.
Example:
```python
class MyModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = metrics.Mean(name='loss')
def compute_loss(self, x, y, y_pred, sample_weight, training=True):
loss = ops.mean((y_pred - y) ** 2)
loss += ops.sum(self.losses)
self.loss_tracker.update_state(loss)
return loss
def reset_metrics(self):
self.loss_tracker.reset_state()
@property
def metrics(self):
return [self.loss_tracker]
inputs = layers.Input(shape=(10,), name='my_input')
outputs = layers.Dense(10)(inputs)
model = MyModel(inputs, outputs)
model.add_loss(ops.sum(outputs))
optimizer = SGD()
model.compile(optimizer, loss='mse', steps_per_execution=10)
dataset = ...
model.fit(dataset, epochs=2, steps_per_epoch=10)
print(f"Custom loss: {model.loss_tracker.result()}")
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model (output of `model(x)`)
sample_weight: Sample weights for weighting the loss function.
training: Whether we are training or evaluating the model.
Returns:
The total loss as a scalar tensor, or `None` if no loss results
(which is the case when called by `Model.test_step`).
"""
# The default implementation does not use `x` or `training`.
del x
del training
losses = []
if self._compile_loss is not None:
loss = self._compile_loss(y, y_pred, sample_weight)
if loss is not None:
losses.append(loss)
for loss in self.losses:
losses.append(self._aggregate_additional_loss(loss))
if backend.backend() != "jax" and len(losses) == 0:
raise ValueError(
"No loss to compute. Provide a `loss` argument in `compile()`."
)
if len(losses) == 1:
total_loss = losses[0]
elif len(losses) == 0:
total_loss = ops.zeros(())
else:
total_loss = ops.sum(losses)
return total_loss
def _compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
"""Backwards compatibility wrapper for `compute_loss`.
This should be used instead `compute_loss` within `train_step` and
`test_step` to support overrides of `compute_loss` that may not have
the `training` argument, as this argument was added in Keras 3.3.
"""
if self._compute_loss_has_training_arg:
return self.compute_loss(
x, y, y_pred, sample_weight, training=training
)
else:
return self.compute_loss(x, y, y_pred, sample_weight)
def _aggregate_additional_loss(self, loss):
"""Aggregates losses from `add_loss`, regularizers and sublayers.
Args:
loss: A tensor representing the additional loss to aggregate.
Returns:
A tensor representing the summed loss, cast to the `floatx()` if
necessary.
"""
if not backend.is_float_dtype(loss.dtype):
loss = ops.cast(loss, dtype=backend.floatx())
return ops.sum(loss)
def stateless_compute_loss(
self,
trainable_variables,
non_trainable_variables,
metrics_variables,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
var_mapping = list(zip(self.trainable_variables, trainable_variables))
var_mapping.extend(
zip(self.non_trainable_variables, non_trainable_variables)
)
var_mapping.extend(zip(self.metrics_variables, metrics_variables))
with backend.StatelessScope(state_mapping=var_mapping) as scope:
# Note that this is needed for the regularization loss, which need
# the latest value of train/non-trainable variables.
loss = self._compute_loss(
x,
y,
y_pred,
sample_weight=sample_weight,
training=training,
)
# Update non trainable vars (may have been updated in compute_loss)
non_trainable_variables = []
for v in self.non_trainable_variables:
new_v = scope.get_current_value(v)
non_trainable_variables.append(new_v)
# Update metrics vars (may have been updated in compute_loss)
metrics_variables = []
for v in self.metrics_variables:
new_v = scope.get_current_value(v)
metrics_variables.append(new_v)
return loss, (
trainable_variables,
non_trainable_variables,
metrics_variables,
)
def compute_metrics(self, x, y, y_pred, sample_weight=None):
"""Update metric states and collect all metrics to be returned.
Subclasses can optionally override this method to provide custom metric
updating and collection logic. Custom metrics are not passed in
`compile()`, they can be created in `__init__` or `build`. They are
automatically tracked and returned by `self.metrics`.
Example:
```python
class MyModel(Sequential):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_metric = MyMetric(name="custom_metric")
def compute_metrics(self, x, y, y_pred, sample_weight):
# This super call updates metrics from `compile` and returns
# results for all metrics listed in `self.metrics`.
metric_results = super().compute_metrics(
x, y, y_pred, sample_weight)
# `metric_results` contains the previous result for
# `custom_metric`, this is where we update it.
self.custom_metric.update_state(x, y, y_pred, sample_weight)
metric_results['custom_metric'] = self.custom_metric.result()
return metric_results
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model output of `model.call(x)`.
sample_weight: Sample weights for weighting the loss function.
Returns:
A `dict` containing values that will be passed to
`keras.callbacks.CallbackList.on_train_batch_end()`. Typically,
the values of the metrics listed in `self.metrics` are returned.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
del x # The default implementation does not use `x`.
if self._compile_metrics is not None:
self._compile_metrics.update_state(y, y_pred, sample_weight)
return self.get_metrics_result()
def get_metrics_result(self):
"""Returns the model's metrics values as a dict.
If any of the metric result is a dict (containing multiple metrics),
each of them gets added to the top level returned dict of this method.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return python_utils.pythonify_logs(return_metrics)
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
"""Trains the model for a fixed number of epochs (dataset iterations).
Args:
x: Input data. It can be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A backend-native tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `keras.utils.PyDataset` returning `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `tf.data.Dataset` yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `torch.utils.data.DataLoader` yielding `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
- A Python generator function yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
y: Target data. Like the input data `x`, it can be either NumPy
array(s) or backend-native tensor(s). If `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or a Python generator function,
`y` should not be specified since targets will be obtained from
`x`.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your input data `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided
(unless the `steps_per_epoch` flag is set to
something other than None).
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
"auto" becomes 1 for most cases.
Note that the progress bar is not
particularly useful when logged to a file,
so `verbose=2` is recommended when not running interactively
(e.g., in a production environment). Defaults to `"auto"`.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `keras.callbacks`. Note
`keras.callbacks.ProgbarLogger` and
`keras.callbacks.History` callbacks are created
automatically and need not be passed to `model.fit()`.
`keras.callbacks.ProgbarLogger` is created
or not based on the `verbose` argument in `model.fit()`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model
metrics on this data at the end of each epoch. The validation
data is selected from the last samples in the `x` and `y` data
provided, before shuffling.
This argument is only supported when `x` and `y` are made of
NumPy arrays or tensors.
If both `validation_data` and `validation_split` are provided,
`validation_data` will override `validation_split`.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. Thus, note the fact
that the validation loss of data provided using
`validation_split` or `validation_data` is not affected by
regularization layers like noise and dropout.
`validation_data` will override `validation_split`.
It can be:
- A tuple `(x_val, y_val)` of NumPy arrays or tensors.
- A tuple `(x_val, y_val, val_sample_weights)` of NumPy
arrays.
- A `keras.utils.PyDataset`, a `tf.data.Dataset`, a
`torch.utils.data.DataLoader` yielding `(inputs, targets)` or a
Python generator function yielding `(x_val, y_val)` or
`(inputs, targets, sample_weights)`.
shuffle: Boolean, whether to shuffle the training data before each
epoch. This argument is ignored when `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class. When `class_weight` is specified
and targets have a rank of 2 or greater, either `y` must be
one-hot encoded, or an explicit final dimension of `1` must
be included for sparse class labels.
sample_weight: Optional NumPy array or tensor of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
NumPy array or tensor with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D NumPy array or tensor with
shape `(samples, sequence_length)` to apply a different weight
to every timestep of every sample.
This argument is not supported when `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function.
Instead, provide `sample_weights` as the third element of `x`.
Note that sample weighting does not apply to metrics specified
via the `metrics` argument in `compile()`. To apply sample
weighting to your metrics, you can specify them via the
`weighted_metrics` in `compile()` instead.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples) before declaring one
epoch finished and starting the next epoch. When training with
input tensors or NumPy arrays, the default `None` means that the
value used is the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
If `x` is a `keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function, the
epoch will run until the input dataset is exhausted. When
passing an infinitely repeating dataset, you must specify the
`steps_per_epoch` argument, otherwise the training will run
indefinitely.
validation_steps: Integer or `None`.
Only relevant if `validation_data` is provided.
Total number of steps (batches of samples) to draw before
stopping when performing validation at the end of every epoch.
If `validation_steps` is `None`, validation will run until the
`validation_data` dataset is exhausted. In the case of an
infinitely repeating dataset, it will run indefinitely. If
`validation_steps` is specified and only part of the dataset
is consumed, the evaluation will start from the beginning of the
dataset at each epoch. This ensures that the same validation
samples are used every time.
validation_batch_size: Integer or `None`.
Number of samples per validation batch.
If unspecified, will default to `batch_size`.
Do not specify the `validation_batch_size` if your data is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
validation_freq: Only relevant if validation data is provided.
Specifies how many training epochs to run
before a new validation run is performed,
e.g. `validation_freq=2` runs validation every 2 epochs.
Unpacking behavior for iterator-like inputs:
A common pattern is to pass an iterator like object such as a
`tf.data.Dataset` or a `keras.utils.PyDataset` to `fit()`,
which will in fact yield not only features (`x`)
but optionally targets (`y`) and sample weights (`sample_weight`).
Keras requires that the output of such iterator-likes be
unambiguous. The iterator should return a tuple
of length 1, 2, or 3, where the optional second and third elements
will be used for `y` and `sample_weight` respectively.
Any other type provided will be wrapped in
a length-one tuple, effectively treating everything as `x`. When
yielding dicts, they should still adhere to the top-level tuple
structure,
e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate
features, targets, and weights from the keys of a single dict.
A notable unsupported data type is the `namedtuple`. The reason is
that it behaves like both an ordered datatype (tuple) and a mapping
datatype (dict). So given a namedtuple of the form:
`namedtuple("example_tuple", ["y", "x"])`
it is ambiguous whether to reverse the order of the elements when
interpreting the value. Even worse is a tuple of the form:
`namedtuple("other_tuple", ["x", "y", "z"])`
where it is unclear if the tuple was intended to be unpacked
into `x`, `y`, and `sample_weight` or passed through
as a single element to `x`.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
"""
raise NotImplementedError
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches (see the `batch_size` arg.)
Args:
x: Input data. It can be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A backend-native tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `keras.utils.PyDataset` returning `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `tf.data.Dataset` yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `torch.utils.data.DataLoader` yielding `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
- A Python generator function yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
y: Target data. Like the input data `x`, it can be either NumPy
array(s) or backend-native tensor(s). If `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or a Python generator function,
`y` should not be specified since targets will be obtained from
`x`.
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your input data `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` becomes 1 for most cases.
Note that the progress bar is not
particularly useful when logged to a file, so `verbose=2` is
recommended when not running interactively
(e.g. in a production environment). Defaults to `"auto"`.
sample_weight: Optional NumPy array or tensor of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
NumPy array or tensor with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D NumPy array or tensor with
shape `(samples, sequence_length)` to apply a different weight
to every timestep of every sample.
This argument is not supported when `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function.
Instead, provide `sample_weights` as the third element of `x`.
Note that sample weighting does not apply to metrics specified
via the `metrics` argument in `compile()`. To apply sample
weighting to your metrics, you can specify them via the
`weighted_metrics` in `compile()` instead.
steps: Integer or `None`.
Total number of steps (batches of samples) to draw before
declaring the evaluation round finished. If `steps` is `None`,
it will run until `x` is exhausted. In the case of an infinitely
repeating dataset, it will run indefinitely.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during evaluation.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric.
If `False`, they are returned as a list.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics).
Note: When using compiled metrics, `evaluate()` may return multiple
submetric values, while `model.metrics_names` often lists only
top-level names (e.g., 'loss', 'compile_metrics'), leading to a
length mismatch. The order of the `evaluate()` output corresponds
to the order of metrics specified during `model.compile()`. You can
use this order to map the `evaluate()` results to the intended
metric. `model.metrics_names` itself will still return only the
top-level names.
"""
raise NotImplementedError
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
"""Generates output predictions for the input samples.
Computation is done in batches. This method is designed for batch
processing of large numbers of inputs. It is not intended for use inside
of loops that iterate over your data and process small numbers of inputs
at a time.
For small numbers of inputs that fit in one batch,
directly use `__call__()` for faster execution, e.g.,
`model(x)`, or `model(x, training=False)` if you have layers such as
`BatchNormalization` that behave differently during
inference.
Note: See [this FAQ entry](
https://keras.io/getting_started/faq/#whats-the-difference-between-model-methods-predict-and-call)
for more details about the difference between `Model` methods
`predict()` and `__call__()`.
Args:
x: Input data. It can be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A backend-native tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `keras.utils.PyDataset`.
- A `tf.data.Dataset`.
- A `torch.utils.data.DataLoader`.
- A Python generator function.
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your input data `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` becomes 1 for most cases. Note that the progress bar
is not particularly useful when logged to a file,
so `verbose=2` is recommended when not running interactively
(e.g. in a production environment). Defaults to `"auto"`.
steps: Total number of steps (batches of samples) to draw before
declaring the prediction round finished. If `steps` is `None`,
it will run until `x` is exhausted. In the case of an infinitely
repeating dataset, it will run indefinitely.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
Returns:
NumPy array(s) of predictions.
"""
raise NotImplementedError
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
"""Runs a single gradient update on a single batch of data.
Args:
x: Input data. Must be array-like.
y: Target data. Must be array-like.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape `(samples, sequence_length)`, to apply a different
weight to every timestep of every sample.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) to apply to the model's loss for the samples
from this class during training. This can be useful to tell the
model to "pay more attention" to samples from an
under-represented class. When `class_weight` is specified
and targets have a rank of 2 or greater, either `y` must
be one-hot encoded, or an explicit final dimension of 1
must be included for sparse class labels.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric. If `False`,
they are returned as a list.
Returns:
A scalar loss value (when no metrics and `return_dict=False`),
a list of loss and metric values
(if there are metrics and `return_dict=False`), or a dict of
metric and loss values (if `return_dict=True`).
"""
raise NotImplementedError
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
"""Test the model on a single batch of samples.
Args:
x: Input data. Must be array-like.
y: Target data. Must be array-like.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape `(samples, sequence_length)`, to apply a different
weight to every timestep of every sample.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric. If `False`,
they are returned as a list.
Returns:
A scalar loss value (when no metrics and `return_dict=False`),
a list of loss and metric values
(if there are metrics and `return_dict=False`), or a dict of
metric and loss values (if `return_dict=True`).
"""
raise NotImplementedError
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Args:
x: Input data. It must be array-like.
Returns:
NumPy array(s) of predictions.
"""
raise NotImplementedError
def get_compile_config(self):
"""Returns a serialized config with information for compiling the model.
This method returns a config dictionary containing all the information
(optimizer, loss, metrics, etc.) with which the model was compiled.
Returns:
A dict containing information for compiling the model.
"""
if self.compiled and hasattr(self, "_compile_config"):
return self._compile_config.serialize()
return {}
def compile_from_config(self, config):
"""Compiles the model with the information given in config.
This method uses the information in the config (optimizer, loss,
metrics, etc.) to compile the model.
Args:
config: Dict containing information for compiling the model.
"""
has_overridden_compile = self.__class__.compile != Trainer.compile
if has_overridden_compile:
warnings.warn(
"`compile()` was not called as part of model loading "
"because the model's `compile()` method is custom. "
"All subclassed Models that have `compile()` "
"overridden should also override "
"`get_compile_config()` and `compile_from_config(config)`. "
"Alternatively, you can "
"call `compile()` manually after loading.",
stacklevel=2,
)
return
config = serialization_lib.deserialize_keras_object(config)
self.compile(**config)
if hasattr(self, "optimizer") and self.built:
# Create optimizer variables.
self.optimizer.build(self.trainable_variables)
def _should_eval(self, epoch, validation_freq):
epoch = epoch + 1 # one-index the user-facing epoch.
if isinstance(validation_freq, int):
return epoch % validation_freq == 0
elif isinstance(validation_freq, list):
return epoch in validation_freq
else:
raise ValueError(
"Expected `validation_freq` to be a list or int. "
f"Received: validation_freq={validation_freq} of the "
f"type {type(validation_freq)}."
)
def _get_metrics_result_or_logs(self, logs):
"""Returns model metrics as a dict if the keys match with input logs.
When the training / evaluation is performed with an asynchronous steps,
the last scheduled `train / test_step` may not give the latest metrics
because it is not guaranteed to be executed the last. This method gets
metrics from the model directly instead of relying on the return from
last step function.
When the user has custom train / test step functions, the metrics
returned may be different from `Model.metrics`. In those instances,
this function will be no-op and return the logs passed in.
Args:
logs: A `dict` of metrics returned by train / test step function.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`
when logs and model metrics keys match. Otherwise it returns input
`logs`.
"""
metric_logs = self.get_metrics_result()
# Verify that train / test step logs passed and metric logs have
# matching keys. It could be different when using custom step functions,
# in which case we return the logs from the last step.
if isinstance(logs, dict) and set(logs.keys()) == set(
metric_logs.keys()
):
return metric_logs
return logs
def _flatten_metrics_in_order(self, logs):
"""Turns `logs` dict into a list as per key order of `metrics_names`."""
metric_names = []
for metric in self.metrics:
if isinstance(metric, CompileMetrics):
metric_names += [
sub_metric.name for sub_metric in metric.metrics
]
else:
metric_names.append(metric.name)
results = []
for name in metric_names:
if name in logs:
results.append(logs[name])
for key in sorted(logs.keys()):
if key not in metric_names:
results.append(logs[key])
if len(results) == 1:
return results[0]
return results
def _assert_compile_called(self, method_name=None):
if not self.compiled:
msg = "You must call `compile()` before "
if metrics_module:
msg += "using the model."
else:
msg += f"calling `{method_name}()`."
raise ValueError(msg)
def _symbolic_build(self, iterator=None, data_batch=None):
model_unbuilt = not all(layer.built for layer in self._flatten_layers())
compile_metrics_unbuilt = (
self._compile_metrics is not None
and not self._compile_metrics.built
)
compile_loss_unbuilt = (
self._compile_loss is not None and not self._compile_loss.built
)
optimizer_unbuilt = (
self.optimizer is not None and not self.optimizer.built
)
if model_unbuilt or compile_metrics_unbuilt or compile_loss_unbuilt:
# Create symbolic tensors matching an input batch.
def to_symbolic_input(v):
if v is None:
return None
return backend.KerasTensor(
v.shape, backend.standardize_dtype(v.dtype)
)
if data_batch is None:
for _, _, data_or_iterator in iterator:
if isinstance(data_or_iterator, (list, tuple)):
data_batch = data_or_iterator[0]
else:
data_batch = next(data_or_iterator)
break
data_batch = tree.map_structure(to_symbolic_input, data_batch)
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data_batch)
# Build all model state with `backend.compute_output_spec`.
try:
y_pred = backend.compute_output_spec(self, x, training=False)
except Exception as e:
raise RuntimeError(
"Unable to automatically build the model. "
"Please build it yourself before calling "
"fit/evaluate/predict. "
"A model is 'built' when its variables have "
"been created and its `self.built` attribute "
"is True. Usually, calling the model on a batch "
"of data is the right way to build it.\n"
"Exception encountered:\n"
f"'{e}'"
)
if compile_metrics_unbuilt:
# Build all metric state with `backend.compute_output_spec`.
backend.compute_output_spec(
self.compute_metrics,
x,
y,
y_pred,
sample_weight=sample_weight,
)
if compile_loss_unbuilt:
# Build `CompileLoss` state with `backend.compute_output_spec`.
backend.compute_output_spec(
self._compute_loss,
x,
y,
y_pred,
sample_weight=sample_weight,
training=False,
)
if optimizer_unbuilt:
# Build optimizer
self.optimizer.build(self.trainable_variables)
self._post_build()
def model_supports_jit(model):
# XLA not supported with TF on MacOS GPU
if platform.system() == "Darwin" and "arm" in platform.processor().lower():
if backend.backend() == "tensorflow":
from keras.src.utils.module_utils import tensorflow as tf
if tf.config.list_physical_devices("GPU"):
return False
# XLA not supported by some layers
if all(x.supports_jit for x in model._flatten_layers()):
if backend.backend() == "tensorflow":
from tensorflow.python.framework.config import (
is_op_determinism_enabled,
)
if is_op_determinism_enabled():
# disable XLA with determinism enabled since not all ops are
# supported by XLA with determinism enabled.
return False
return True
return False
| Trainer |
python | pytest-dev__pytest | doc/en/example/customdirectory/conftest.py | {
"start": 90,
"end": 1071
} | class ____(pytest.Directory):
def collect(self):
# The standard pytest behavior is to loop over all `test_*.py` files and
# call `pytest_collect_file` on each file. This collector instead reads
# the `manifest.json` file and only calls `pytest_collect_file` for the
# files defined there.
manifest_path = self.path / "manifest.json"
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
ihook = self.ihook
for file in manifest["files"]:
yield from ihook.pytest_collect_file(
file_path=self.path / file, parent=self
)
@pytest.hookimpl
def pytest_collect_directory(path, parent):
# Use our custom collector for directories containing a `manifest.json` file.
if path.joinpath("manifest.json").is_file():
return ManifestDirectory.from_parent(parent=parent, path=path)
# Otherwise fallback to the standard behavior.
return None
| ManifestDirectory |
python | doocs__leetcode | solution/0700-0799/0758.Bold Words in String/Solution.py | {
"start": 352,
"end": 1523
} | class ____:
def boldWords(self, words: List[str], s: str) -> str:
trie = Trie()
for w in words:
trie.insert(w)
n = len(s)
pairs = []
for i in range(n):
node = trie
for j in range(i, n):
idx = ord(s[j])
if node.children[idx] is None:
break
node = node.children[idx]
if node.is_end:
pairs.append([i, j])
if not pairs:
return s
st, ed = pairs[0]
t = []
for a, b in pairs[1:]:
if ed + 1 < a:
t.append([st, ed])
st, ed = a, b
else:
ed = max(ed, b)
t.append([st, ed])
ans = []
i = j = 0
while i < n:
if j == len(t):
ans.append(s[i:])
break
st, ed = t[j]
if i < st:
ans.append(s[i:st])
ans.append('<b>')
ans.append(s[st : ed + 1])
ans.append('</b>')
j += 1
i = ed + 1
return ''.join(ans)
| Solution |
python | kubernetes-client__python | kubernetes/client/api/resource_v1beta2_api.py | {
"start": 543,
"end": 450723
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_device_class(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta2DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_device_class_with_http_info(body, **kwargs) # noqa: E501
def create_device_class_with_http_info(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta2DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim_template(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_template_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_template_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_resource_slice(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta2ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_resource_slice_with_http_info(body, **kwargs) # noqa: E501
def create_resource_slice_with_http_info(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta2ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_device_class(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_device_class_with_http_info(**kwargs) # noqa: E501
def delete_collection_device_class_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_resource_slice(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_resource_slice_with_http_info(**kwargs) # noqa: E501
def delete_collection_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_device_class(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_device_class_with_http_info(name, **kwargs) # noqa: E501
def delete_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_resource_slice(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def delete_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_device_class(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_device_class_with_http_info(**kwargs) # noqa: E501
def list_device_class_with_http_info(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_template_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_template_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_template_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_template_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_slice(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSliceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_slice_with_http_info(**kwargs) # noqa: E501
def list_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSliceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSliceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_device_class(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def patch_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_resource_slice(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def patch_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_device_class(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_device_class_with_http_info(name, **kwargs) # noqa: E501
def read_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_resource_slice(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def read_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_device_class(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1beta2DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def replace_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1beta2DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/deviceclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaims/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta2ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_resource_slice(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1beta2ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta2ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def replace_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1beta2ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta2ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta2/resourceslices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta2ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| ResourceV1beta2Api |
python | readthedocs__readthedocs.org | readthedocs/organizations/managers.py | {
"start": 1242,
"end": 1324
} | class ____(SettingsOverrideObject):
_default_class = TeamManagerBase
| TeamManager |
python | django-haystack__django-haystack | haystack/generic_views.py | {
"start": 3865,
"end": 4027
} | class ____(FacetedSearchMixin, SearchView):
"""
A view class for searching a Haystack managed search index with
facets
"""
pass
| FacetedSearchView |
python | django__django | tests/mutually_referential/models.py | {
"start": 162,
"end": 401
} | class ____(models.Model):
name = models.CharField(max_length=100)
# Use a simple string for forward declarations.
bestchild = models.ForeignKey(
"Child", models.SET_NULL, null=True, related_name="favored_by"
)
| Parent |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/cat.py | {
"start": 893,
"end": 1341
} | class ____(SeriesDefault):
"""Builder for default-to-pandas methods which is executed under category accessor."""
@classmethod
def frame_wrapper(cls, df):
"""
Get category accessor of the passed frame.
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.core.arrays.categorical.CategoricalAccessor
"""
return df.squeeze(axis=1).cat
| CatDefault |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/pypy/common.py | {
"start": 266,
"end": 1716
} | class ____(ViaGlobalRefVirtualenvBuiltin, abc.ABC):
@classmethod
def can_describe(cls, interpreter):
return interpreter.implementation == "PyPy" and super().can_describe(interpreter)
@classmethod
def _executables(cls, interpreter):
host = Path(interpreter.system_executable)
targets = sorted(f"{name}{PyPy.suffix}" for name in cls.exe_names(interpreter))
yield host, targets, RefMust.NA, RefWhen.ANY
@classmethod
def executables(cls, interpreter):
yield from super().sources(interpreter)
@classmethod
def exe_names(cls, interpreter):
return {
cls.exe_stem(),
"python",
f"python{interpreter.version_info.major}",
f"python{interpreter.version_info.major}.{interpreter.version_info.minor}",
}
@classmethod
def sources(cls, interpreter):
yield from cls.executables(interpreter)
for host in cls._add_shared_libs(interpreter):
yield PathRefToDest(host, dest=lambda self, s: self.bin_dir / s.name)
@classmethod
def _add_shared_libs(cls, interpreter):
# https://bitbucket.org/pypy/pypy/issue/1922/future-proofing-virtualenv
python_dir = Path(interpreter.system_executable).resolve().parent
yield from cls._shared_libs(python_dir)
@classmethod
def _shared_libs(cls, python_dir):
raise NotImplementedError
__all__ = [
"PyPy",
]
| PyPy |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0072_add_detector_to_workflowfirehistory.py | {
"start": 222,
"end": 1711
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("workflow_engine", "0071_migrate_remaining_metric_alerts"),
]
operations = [
migrations.AddField(
model_name="workflowfirehistory",
name="detector",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="workflow_engine.detector",
),
),
]
| Migration |
python | getsentry__sentry | tests/sentry/integrations/vercel/test_uninstall.py | {
"start": 3000,
"end": 12802
} | class ____(APITestCase):
def setUp(self) -> None:
self.url = "/extensions/vercel/delete/"
self.second_org = self.create_organization(name="Blah", owner=self.user)
metadata = {
"access_token": "my_access_token",
"installation_id": "my_config_id",
"installation_type": "team",
"webhook_id": "my_webhook_id",
"configurations": {
"my_config_id": {
"access_token": "my_access_token",
"webhook_id": "my_webhook_id",
"organization_id": self.organization.id,
},
"my_config_id2": {
"access_token": "my_access_token2",
"webhook_id": "my_webhook_id2",
"organization_id": self.second_org.id,
},
},
}
self.integration = self.create_provider_integration(
provider="vercel",
external_id="vercel_team_id",
name="My Vercel Team",
metadata=metadata,
)
self.integration.add_organization(self.organization)
self.integration.add_organization(self.second_org)
def test_uninstall_primary_configuration(self) -> None:
"""
Test uninstalling the configuration whose credentials
* access_token
* webhook_id
* installation_id
are used in the primary metadata for the integration.
"""
assert len(OrganizationIntegration.objects.all()) == 2
response = self.client.delete(
path=self.url,
data=PRIMARY_UNINSTALL_RESPONSE,
content_type="application/json",
)
assert response.status_code == 204
assert len(OrganizationIntegration.objects.all()) == 1
integration = Integration.objects.get(id=self.integration.id)
assert integration.metadata == {
"access_token": "my_access_token2",
"installation_id": "my_config_id2",
"installation_type": "team",
"webhook_id": "my_webhook_id2",
"configurations": {
"my_config_id2": {
"access_token": "my_access_token2",
"webhook_id": "my_webhook_id2",
"organization_id": self.second_org.id,
}
},
}
def test_uninstall_non_primary_configuration(self) -> None:
"""
Test uninstalling a configuration that is only stored
in the "configurations" metadata.
"""
assert len(OrganizationIntegration.objects.all()) == 2
response = self.client.delete(
path=self.url,
data=NONPRIMARY_UNINSTALL_RESPONSE,
content_type="application/json",
)
assert response.status_code == 204
assert len(OrganizationIntegration.objects.all()) == 1
integration = Integration.objects.get(id=self.integration.id)
assert integration.metadata == {
"access_token": "my_access_token",
"installation_id": "my_config_id",
"installation_type": "team",
"webhook_id": "my_webhook_id",
"configurations": {
"my_config_id": {
"access_token": "my_access_token",
"webhook_id": "my_webhook_id",
"organization_id": self.organization.id,
}
},
}
def test_uninstall_single_configuration(self) -> None:
"""
Test uninstalling an integration with only one organization
associated with it.
"""
org = self.create_organization(owner=self.user)
metadata = {
"access_token": "my_access_token",
"installation_id": "my_config_id",
"installation_type": "user",
"webhook_id": "my_webhook_id",
"configurations": {
"my_config_id": {
"access_token": "my_access_token",
"webhook_id": "my_webhook_id",
"organization_id": org.id,
}
},
}
integration = self.create_provider_integration(
provider="vercel",
external_id="vercel_user_id",
name="My Vercel Team",
metadata=metadata,
)
integration.add_organization(org)
response = self.client.delete(
path=self.url,
data=USERID_UNINSTALL_RESPONSE,
content_type="application/json",
)
assert response.status_code == 204
assert not Integration.objects.filter(id=integration.id).exists()
assert not OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=org.id
).exists()
@responses.activate
def test_uninstall_from_sentry(self) -> None:
"""
Test flows of uninstalling from sentry first to make sure
that uninstall webhook is valid even if the OrganizationIntegration
was deleted prior.
1. Uninstall the primary configuration
2. Check that integration metadata still updated
3. Uninstall remaining configuration
4. Check that integration is removed
"""
self.login_as(self.user)
with self.tasks():
config_id = "my_config_id"
responses.add(
responses.DELETE,
f"{VercelClient.base_url}{VercelClient.UNINSTALL % config_id}",
json={},
)
path = (
f"/api/0/organizations/{self.organization.slug}/integrations/{self.integration.id}/"
)
response = self.client.delete(path, format="json")
assert response.status_code == 204
assert (
len(
OrganizationIntegration.objects.filter(
integration=self.integration,
status=ObjectStatus.ACTIVE,
)
)
== 1
)
response = self.client.delete(
path=self.url,
data=PRIMARY_UNINSTALL_RESPONSE,
content_type="application/json",
)
assert response.status_code == 204
integration = Integration.objects.get(id=self.integration.id)
assert integration.metadata == {
"access_token": "my_access_token2",
"installation_id": "my_config_id2",
"installation_type": "team",
"webhook_id": "my_webhook_id2",
"configurations": {
"my_config_id2": {
"access_token": "my_access_token2",
"webhook_id": "my_webhook_id2",
"organization_id": self.second_org.id,
}
},
}
with self.tasks():
config_id = "my_config_id2"
responses.add(
responses.DELETE,
f"{VercelClient.base_url}{VercelClient.UNINSTALL % config_id}",
json={},
)
path = (
f"/api/0/organizations/{self.second_org.slug}/integrations/{self.integration.id}/"
)
response = self.client.delete(path, format="json")
assert response.status_code == 204
assert (
len(
OrganizationIntegration.objects.filter(
integration=self.integration,
status=ObjectStatus.ACTIVE,
)
)
== 0
)
response = self.client.delete(
path=self.url,
data=NONPRIMARY_UNINSTALL_RESPONSE,
content_type="application/json",
)
assert response.status_code == 204
assert not Integration.objects.filter(id=self.integration.id).exists()
@responses.activate
def test_uninstall_from_sentry_error(self) -> None:
"""
Test that if we uninstall from Sentry and fail to remove the integration using Vercel's
delete integration endpoint, we continue and delete the integration in Sentry.
"""
org = self.create_organization(owner=self.user)
metadata = {
"access_token": "my_access_token",
"installation_id": "my_config_id",
"installation_type": "user",
"webhook_id": "my_webhook_id",
"configurations": {
"my_config_id": {
"access_token": "my_access_token",
"webhook_id": "my_webhook_id",
"organization_id": org.id,
}
},
}
integration = self.create_provider_integration(
provider="vercel",
external_id="vercel_user_id",
name="My Vercel Team",
metadata=metadata,
)
integration.add_organization(org)
oi = OrganizationIntegration.objects.get(integration=integration)
self.login_as(self.user)
with self.tasks():
config_id = "my_config_id"
responses.add(
responses.DELETE,
f"{VercelClient.base_url}{VercelClient.UNINSTALL % config_id}",
json={"error": {"message": "You don't have permission to access this resource."}},
status=403,
)
path = f"/api/0/organizations/{org.slug}/integrations/{integration.id}/"
response = self.client.delete(path, format="json")
assert response.status_code == 204
assert ScheduledDeletion.objects.filter(
model_name="OrganizationIntegration", object_id=oi.id
).exists()
| VercelUninstallWithConfigurationsTest |
python | dagster-io__dagster | python_modules/automation/automation/dagster_dev/commands/cache_manager.py | {
"start": 459,
"end": 4718
} | class ____:
"""Manages persistent cache for repository analysis."""
def __init__(self, cache_dir: Optional[Path] = None):
"""Initialize cache manager."""
if cache_dir is None:
# Default to .git directory
git_dir = Path(".git")
if not git_dir.exists():
raise ValueError("Not in a git repository - cannot create cache")
cache_dir = git_dir / "claude_analysis_cache"
self.cache_dir = cache_dir
self.cache_dir.mkdir(exist_ok=True)
self.cache_file = self.cache_dir / "repository_analysis.json"
def _get_current_commit_hash(self) -> str:
"""Get current commit hash."""
import subprocess
result = subprocess.run(
["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=True
)
return result.stdout.strip()
def _get_current_branch(self) -> str:
"""Get current branch name."""
import subprocess
result = subprocess.run(
["git", "branch", "--show-current"], capture_output=True, text=True, check=True
)
return result.stdout.strip()
def get_cache_key(self, commit_hash: str, branch_name: str) -> str:
"""Generate cache key from commit and branch."""
return f"{branch_name}:{commit_hash[:12]}"
def is_cache_valid(self, entry: CacheEntry, max_age_seconds: int = 3600) -> bool:
"""Check if cache entry is still valid."""
current_time = time.time()
age = current_time - entry.analysis_timestamp
return (
entry.commit_hash == self._get_current_commit_hash()
and entry.branch_name == self._get_current_branch()
and age < max_age_seconds
)
def get_cached_analysis(self) -> Optional[CacheEntry]:
"""Get cached analysis for current repository state."""
if not self.cache_file.exists():
return None
try:
cache_data = json.loads(self.cache_file.read_text())
entry = CacheEntry(**cache_data)
if self.is_cache_valid(entry):
return entry
else:
# Cache is stale
return None
except (json.JSONDecodeError, TypeError, ValueError):
# Corrupted cache file
return None
def store_analysis(
self,
diff_summary: dict[str, Any],
smart_analysis: Optional[dict[str, Any]] = None,
pr_number: Optional[str] = None,
) -> None:
"""Store analysis results in cache."""
entry = CacheEntry(
commit_hash=self._get_current_commit_hash(),
branch_name=self._get_current_branch(),
pr_number=pr_number,
analysis_timestamp=time.time(),
diff_summary=diff_summary,
smart_analysis=smart_analysis,
)
# Write to cache file
self.cache_file.write_text(json.dumps(asdict(entry), indent=2))
def clear_cache(self) -> bool:
"""Clear all cached data."""
try:
if self.cache_file.exists():
self.cache_file.unlink()
return True
except OSError:
return False
def get_cache_status(self) -> dict[str, Any]:
"""Get information about cache state."""
if not self.cache_file.exists():
return {"exists": False, "size_bytes": 0, "entries": 0}
try:
stat = self.cache_file.stat()
cache_data = json.loads(self.cache_file.read_text())
entry = CacheEntry(**cache_data)
return {
"exists": True,
"size_bytes": stat.st_size,
"entries": 1,
"last_analysis": entry.analysis_timestamp,
"cached_commit": entry.commit_hash[:12],
"cached_branch": entry.branch_name,
"is_valid": self.is_cache_valid(entry),
}
except (json.JSONDecodeError, TypeError, ValueError):
return {
"exists": True,
"size_bytes": self.cache_file.stat().st_size,
"entries": 0,
"error": "Corrupted cache file",
}
| CacheManager |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/_stream.py | {
"start": 233,
"end": 3521
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | getsentry__sentry | tests/sentry/preprod/api/endpoints/test_project_preprod_check_for_updates.py | {
"start": 385,
"end": 24234
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization()
self.project = self.create_project(organization=self.org)
# Create an integration token with project:distribution scope
token_str = generate_token(self.org.slug, "")
with assume_test_silo_mode(SiloMode.CONTROL):
OrgAuthToken.objects.create(
organization_id=self.org.id,
name="Test Integration Token",
token_hashed=hash_token(token_str),
scope_list=["project:distribution"],
)
self.api_token = token_str
self.file = self.create_file(name="test_artifact.apk", type="application/octet-stream")
# Enable the feature flag for all tests by default
self.feature_context = self.feature({"organizations:preprod-frontend-routes": True})
self.feature_context.__enter__()
def tearDown(self) -> None:
# Exit the feature flag context manager
self.feature_context.__exit__(None, None, None)
super().tearDown()
def _get_url(self):
return reverse(
"sentry-api-0-project-preprod-check-for-updates",
args=[self.org.slug, self.project.slug],
)
def _create_android_artifact(self, **kwargs):
"""Helper to create an Android artifact with default values"""
defaults = {
"project": self.project,
"file_id": self.file.id,
"state": PreprodArtifact.ArtifactState.PROCESSED,
"artifact_type": PreprodArtifact.ArtifactType.APK,
"app_id": "com.example.app",
"app_name": "TestApp",
"build_version": "1.0.0",
"build_number": 42,
"build_configuration": None,
"installable_app_file_id": self.file.id,
"main_binary_identifier": "test-identifier-123",
}
defaults.update(kwargs)
return PreprodArtifact.objects.create(**defaults)
def _create_ios_artifact(self, **kwargs):
"""Helper to create an iOS artifact with default values"""
defaults = {
"project": self.project,
"file_id": self.file.id,
"state": PreprodArtifact.ArtifactState.PROCESSED,
"artifact_type": PreprodArtifact.ArtifactType.XCARCHIVE,
"app_id": "com.example.app",
"app_name": "TestApp",
"build_version": "1.0.0",
"build_number": 42,
"build_configuration": None,
"installable_app_file_id": self.file.id,
"main_binary_identifier": "test-identifier-123",
}
defaults.update(kwargs)
return PreprodArtifact.objects.create(**defaults)
def test_missing_required_parameters(self):
"""Test that missing required parameters return 400"""
url = self._get_url()
response = self.client.get(
url, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token}"
)
assert response.status_code == 400
assert "Missing required parameters" in response.json()["error"]
def test_current_artifact_not_found(self):
"""Test when main_binary_identifier is provided but artifact doesn't exist"""
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=nonexistent",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
assert response.json()["current"] is None
def test_current_artifact_success_ios(self):
"""Test successful current artifact retrieval for iOS"""
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
assert data["current"]["build_number"] == 42
assert data["current"]["app_name"] == "TestApp"
assert data["current"]["download_url"] != ""
assert "created_date" in data["current"]
def test_update_detection_android(self):
"""Test update detection for Android with higher version"""
# Create current artifact
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
# Create higher version artifact
self._create_android_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=1,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
assert data["update"]["build_number"] == 1
assert data["update"]["app_name"] == "TestApp"
assert data["update"]["download_url"] != ""
assert "created_date" in data["update"]
def test_update_detection_ios(self):
"""Test update detection for iOS with higher version"""
# Create current artifact
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
# Create higher version artifact
self._create_ios_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=1,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
assert data["update"]["build_number"] == 1
def test_platform_specific_filtering_android(self):
"""Test that Android platform only returns AAB/APK artifacts"""
# Create Android artifact
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
# Create iOS artifact with same app_id
self._create_ios_artifact(
main_binary_identifier="ios-identifier",
build_version="1.1.0",
build_number=50,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should only find Android artifacts, not iOS
assert data["update"] is None
assert data["current"] is not None
assert (
data["current"]["build_version"] == "1.0.0"
) # Should find the Android artifact, not iOS
def test_installable_artifact_filtering(self):
"""Test that only installable artifacts are considered for updates"""
# Create non-installable artifact (no installable_app_file_id)
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
installable_app_file_id=None,
)
# Create installable artifact with higher version
self._create_android_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=50,
installable_app_file_id=self.file.id,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should find the installable artifact, not the non-installable one
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
def test_highest_build_number_selection(self):
"""Test that the artifact with highest build_number is selected when versions are equal"""
# Create multiple artifacts with same version but different build numbers
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
# Lower build number
self._create_android_artifact(
main_binary_identifier="different-identifier-1",
build_version="1.1.0",
build_number=40,
)
# Higher build number (should be selected)
self._create_android_artifact(
main_binary_identifier="different-identifier-2",
build_version="1.1.0",
build_number=60,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should select the artifact with highest build_number
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
assert data["update"]["build_number"] == 60
def test_no_update_available(self):
"""Test when no higher version is available"""
# Create only current artifact
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["update"] is None # No update available
def test_multiple_artifacts_same_version_different_build_configurations(self):
"""Test handling of multiple artifacts with same version but different build configurations"""
debug_config, _ = PreprodBuildConfiguration.objects.get_or_create(
project=self.project, name="debug"
)
release_config, _ = PreprodBuildConfiguration.objects.get_or_create(
project=self.project, name="release"
)
# Create artifacts with same version but different build configurations
debug_artifact = self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
build_configuration=debug_config,
)
self._create_android_artifact(
main_binary_identifier="different-identifier",
build_version="1.0.0",
build_number=50,
build_configuration=release_config,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&main_binary_identifier=test-identifier&build_configuration=debug",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should find the artifact with matching build configuration
assert data["current"] is not None
assert data["current"]["id"] == str(debug_artifact.id)
def test_build_number_filtering(self):
"""Test that build_number parameter filters correctly"""
# Create artifacts with same version but different build numbers
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
self._create_android_artifact(
main_binary_identifier="test-identifier-2",
build_version="1.0.0",
build_number=50,
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&build_number=42&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should find the artifact with matching build number
assert data["current"] is not None
assert data["current"]["build_number"] == 42
def test_invalid_build_number_format(self):
"""Test that invalid build_number format returns 400"""
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=android&build_version=1.0.0&build_number=invalid&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 400
assert "Invalid build_number format" in response.json()["error"]
def test_without_main_binary_identifier_with_build_number(self):
"""Test that main_binary_identifier is optional when build_number is provided"""
self._create_android_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
)
url = self._get_url()
response = self.client.get(
url + "?app_id=com.example.app&platform=android&build_version=1.0.0&build_number=42",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
def test_missing_both_main_binary_identifier_and_build_number(self):
"""Test that either main_binary_identifier or build_number must be provided"""
url = self._get_url()
response = self.client.get(
url + "?app_id=com.example.app&platform=android&build_version=1.0.0",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 400
assert (
"Either main_binary_identifier or build_number must be provided"
in response.json()["error"]
)
def test_codesigning_type_filters_current_artifact(self):
"""Test that codesigning_type parameter filters the current artifact correctly"""
# Create an iOS artifact with development codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "development"},
)
# Create another artifact with app-store codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "app-store"},
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier&codesigning_type=development",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should only find the development artifact
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
assert data["current"]["build_number"] == 42
def test_codesigning_type_filters_updates(self):
"""Test that updates are filtered by the same codesigning_type as the current artifact"""
# Create current iOS artifact with development codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "development"},
)
# Create update with development codesigning (should be returned)
self._create_ios_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=50,
extras={"codesigning_type": "development"},
)
# Create update with app-store codesigning (should NOT be returned)
self._create_ios_artifact(
main_binary_identifier="another-identifier",
build_version="1.2.0",
build_number=60,
extras={"codesigning_type": "app-store"},
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
# Should only return the development update (1.1.0), not app-store (1.2.0)
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
assert data["update"]["build_number"] == 50
def test_codesigning_type_no_matching_update(self):
"""Test that no update is returned when codesigning_type doesn't match"""
# Create current iOS artifact with development codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "development"},
)
# Create update with app-store codesigning only
self._create_ios_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=50,
extras={"codesigning_type": "app-store"},
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
# Should not return update because codesigning_type doesn't match
assert data["update"] is None
def test_codesigning_type_with_build_configuration(self):
"""Test that codesigning_type works correctly with build configurations"""
debug_config, _ = PreprodBuildConfiguration.objects.get_or_create(
project=self.project, name="debug"
)
# Create current artifact with debug configuration and development codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
build_configuration=debug_config,
extras={"codesigning_type": "development"},
)
# Create update with same configuration and codesigning type
self._create_ios_artifact(
main_binary_identifier="different-identifier",
build_version="1.1.0",
build_number=50,
build_configuration=debug_config,
extras={"codesigning_type": "development"},
)
# Create update with same configuration but different codesigning type
self._create_ios_artifact(
main_binary_identifier="another-identifier",
build_version="1.2.0",
build_number=60,
build_configuration=debug_config,
extras={"codesigning_type": "app-store"},
)
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier&build_configuration=debug",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
assert data["current"] is not None
assert data["current"]["build_version"] == "1.0.0"
# Should return 1.1.0 (matching codesigning_type), not 1.2.0
assert data["update"] is not None
assert data["update"]["build_version"] == "1.1.0"
assert data["update"]["build_number"] == 50
def test_codesigning_type_provided_explicitly(self):
"""Test that explicitly provided codesigning_type parameter is used for filtering"""
# Create artifact with development codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "development"},
)
# Create artifact with app-store codesigning
self._create_ios_artifact(
main_binary_identifier="test-identifier",
build_version="1.0.0",
build_number=42,
extras={"codesigning_type": "app-store"},
)
# Request specifically for app-store
url = self._get_url()
response = self.client.get(
url
+ "?app_id=com.example.app&platform=ios&build_version=1.0.0&main_binary_identifier=test-identifier&codesigning_type=app-store",
format="json",
HTTP_AUTHORIZATION=f"Bearer {self.api_token}",
)
assert response.status_code == 200
data = response.json()
# Should find the app-store artifact
assert data["current"] is not None
| ProjectPreprodCheckForUpdatesEndpointTest |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 12907,
"end": 21402
} | class ____(TestCase):
device_type: str = "generic_device_type"
# Flag to disable test suite early due to unrecoverable error such as CUDA error.
_stop_test_suite = False
# Precision is a thread-local setting since it may be overridden per test
_tls = threading.local()
_tls.precision = TestCase._precision
_tls.rel_tol = TestCase._rel_tol
@property
def precision(self):
return self._tls.precision
@precision.setter
def precision(self, prec):
self._tls.precision = prec
@property
def rel_tol(self):
return self._tls.rel_tol
@rel_tol.setter
def rel_tol(self, prec):
self._tls.rel_tol = prec
# Returns a string representing the device that single device tests should use.
# Note: single device tests use this device exclusively.
@classmethod
def get_primary_device(cls):
return cls.device_type
@classmethod
def _init_and_get_primary_device(cls):
try:
return cls.get_primary_device()
except Exception:
# For CUDATestBase, XPUTestBase, XLATestBase, and possibly others, the primary device won't be available
# until setUpClass() sets it. Call that manually here if needed.
if hasattr(cls, "setUpClass"):
cls.setUpClass()
return cls.get_primary_device()
# Returns a list of strings representing all available devices of this
# device type. The primary device must be the first string in the list
# and the list must contain no duplicates.
# Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
# mechanism of acquiring all available devices.
@classmethod
def get_all_devices(cls):
return [cls.get_primary_device()]
# Returns the dtypes the test has requested.
# Prefers device-specific dtype specifications over generic ones.
@classmethod
def _get_dtypes(cls, test):
if not hasattr(test, "dtypes"):
return None
default_dtypes = test.dtypes.get("all")
msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
assert default_dtypes is not None, msg
return test.dtypes.get(cls.device_type, default_dtypes)
def _get_precision_override(self, test, dtype):
if not hasattr(test, "precision_overrides"):
return self.precision
return test.precision_overrides.get(dtype, self.precision)
def _get_tolerance_override(self, test, dtype):
if not hasattr(test, "tolerance_overrides"):
return self.precision, self.rel_tol
return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
def _apply_precision_override_for_test(self, test, param_kwargs):
dtype = param_kwargs.get("dtype")
dtype = param_kwargs.get("dtypes", dtype)
if dtype:
self.precision = self._get_precision_override(test, dtype)
self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
# Creates device-specific tests.
@classmethod
def instantiate_test(cls, name, test, *, generic_cls=None):
def instantiate_test_helper(
cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []
):
# Add the device param kwarg if the test needs device or devices.
param_kwargs = {} if param_kwargs is None else param_kwargs
test_sig_params = inspect.signature(test).parameters
if "device" in test_sig_params or "devices" in test_sig_params:
device_arg: str = cls._init_and_get_primary_device()
if hasattr(test, "num_required_devices"):
device_arg = cls.get_all_devices()
_update_param_kwargs(param_kwargs, "device", device_arg)
# Apply decorators based on param kwargs.
for decorator in decorator_fn(param_kwargs):
test = decorator(test)
# Constructs the test
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
# Sets precision and runs test
# Note: precision is reset after the test is run
guard_precision = self.precision
guard_rel_tol = self.rel_tol
try:
self._apply_precision_override_for_test(test, param_kwargs)
result = test(self, **param_kwargs)
except RuntimeError as rte:
# check if rte should stop entire test suite.
self._stop_test_suite = self._should_stop_test_suite()
# Check if test has been decorated with `@expectedFailure`
# Using `__unittest_expecting_failure__` attribute, see
# https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
# In that case, make it fail with "unexpected success" by suppressing exception
if (
getattr(test, "__unittest_expecting_failure__", False)
and self._stop_test_suite
):
import sys
print(
"Suppressing fatal exception to trigger unexpected success",
file=sys.stderr,
)
return
# raise the runtime error as is for the test suite to record.
raise rte
finally:
self.precision = guard_precision
self.rel_tol = guard_rel_tol
return result
assert not hasattr(cls, name), f"Redefinition of test {name}"
setattr(cls, name, instantiated_test)
def default_parametrize_fn(test, generic_cls, device_cls):
# By default, no parametrization is needed.
yield (test, "", {}, lambda _: [])
# Parametrization decorators set the parametrize_fn attribute on the test.
parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn)
# If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
dtypes = cls._get_dtypes(test)
if dtypes is not None:
def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
for dtype in dtypes:
param_kwargs: dict[str, Any] = {}
_update_param_kwargs(param_kwargs, "dtype", dtype)
# Note that an empty test suffix is set here so that the dtype can be appended
# later after the device.
yield (test, "", param_kwargs, lambda _: [])
parametrize_fn = compose_parametrize_fns(
dtype_parametrize_fn, parametrize_fn
)
# Instantiate the parametrized tests.
for (
test, # noqa: B020
test_suffix,
param_kwargs,
decorator_fn,
) in parametrize_fn(test, generic_cls, cls):
test_suffix = "" if test_suffix == "" else "_" + test_suffix
cls_device_type = (
cls.device_type
if cls.device_type != "privateuse1"
else torch._C._get_privateuse1_backend_name()
)
device_suffix = "_" + cls_device_type
# Note: device and dtype suffix placement
# Special handling here to place dtype(s) after device according to test name convention.
dtype_kwarg = None
if "dtype" in param_kwargs or "dtypes" in param_kwargs:
dtype_kwarg = (
param_kwargs["dtypes"]
if "dtypes" in param_kwargs
else param_kwargs["dtype"]
)
test_name = (
f"{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}"
)
instantiate_test_helper(
cls=cls,
name=test_name,
test=test,
param_kwargs=param_kwargs,
decorator_fn=decorator_fn,
)
def run(self, result=None):
super().run(result=result)
# Early terminate test if _stop_test_suite is set.
if self._stop_test_suite:
result.stop()
| DeviceTypeTestBase |
python | donnemartin__interactive-coding-challenges | graphs_trees/bst/bst.py | {
"start": 0,
"end": 211
} | class ____(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
def __repr__(self):
return str(self.data)
| Node |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/context.py | {
"start": 1364,
"end": 4214
} | class ____(TypedDict, total=False):
"""Jinja2 template context for task rendering."""
conn: Any
dag: DAG
dag_run: DagRunProtocol
data_interval_end: DateTime | None
data_interval_start: DateTime | None
outlet_events: OutletEventAccessorsProtocol
ds: str
ds_nodash: str
expanded_ti_count: int | None
exception: None | str | BaseException
inlets: list
inlet_events: InletEventsAccessors
logical_date: DateTime
macros: Any
map_index_template: str | None
outlets: list
params: dict[str, Any]
prev_data_interval_start_success: DateTime | None
prev_data_interval_end_success: DateTime | None
prev_start_date_success: DateTime | None
prev_end_date_success: DateTime | None
reason: str | None
run_id: str
start_date: DateTime
# TODO: Remove Operator from below once we have MappedOperator to the Task SDK
# and once we can remove context related code from the Scheduler/models.TaskInstance
task: BaseOperator | Operator
task_reschedule_count: int
task_instance: RuntimeTaskInstanceProtocol
task_instance_key_str: str
# `templates_dict` is only set in PythonOperator
templates_dict: dict[str, Any] | None
test_mode: bool
ti: RuntimeTaskInstanceProtocol
# triggering_asset_events: Mapping[str, Collection[AssetEvent | AssetEventPydantic]]
triggering_asset_events: Any
try_number: int | None
ts: str
ts_nodash: str
ts_nodash_with_tz: str
var: Any
KNOWN_CONTEXT_KEYS: set[str] = set(Context.__annotations__.keys())
def context_merge(context: Context, *args: Any, **kwargs: Any) -> None:
"""
Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
if not context:
context = Context()
context.update(*args, **kwargs)
def get_current_context() -> Context:
"""
Retrieve the execution context dictionary without altering user method's signature.
This is the simplest method of retrieving the execution context dictionary.
**Old style:**
.. code:: python
def my_task(**context):
ti = context["ti"]
**New style:**
.. code:: python
from airflow.sdk import get_current_context
def my_task():
context = get_current_context()
ti = context["ti"]
Current context will only have value if this method was called after an operator
was starting to execute.
"""
from airflow.sdk.definitions._internal.contextmanager import _get_current_context
return _get_current_context()
| Context |
python | tensorflow__tensorflow | tensorflow/python/tpu/async_checkpoint_test.py | {
"start": 4425,
"end": 9133
} | class ____(test.TestCase):
def testAsyncCheckpointHookEnabled(self):
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
checkpoint_interval = 5
config = tpu_config.RunConfig(
master=resolver.master(),
model_dir=os.path.join(FLAGS.model_dir, 'runconfig'),
save_checkpoints_steps=1000,
keep_checkpoint_max=11, # off by one
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=checkpoint_interval,))
estimator = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=config,
train_batch_size=32,
eval_batch_size=32,
predict_batch_size=1,
params={},
)
max_steps = 100
mock_listener = test.mock.create_autospec(
basic_session_run_hooks.CheckpointSaverListener)
estimator.train(
input_fn=input_fn,
max_steps=max_steps,
hooks=[
async_checkpoint.AsyncCheckpointSaverHook(
FLAGS.model_dir,
save_steps=checkpoint_interval,
listeners=[mock_listener])
])
current_step = estimator_lib._load_global_step_from_checkpoint_dir(
FLAGS.model_dir) # pylint: disable=protected-access
# TODO(power) -- identify a better way to count the number of checkpoints.
checkpoints = file_io.get_matching_files(
FLAGS.model_dir + '/model.ckpt*.meta')
checkpoint_count = len(checkpoints)
logging.info('Found %d checkpoints: %s', checkpoint_count, checkpoints)
self.assertLessEqual(checkpoint_count, 10)
self.assertEqual(current_step, max_steps)
mock_listener.before_save.assert_called()
mock_listener.after_save.assert_called()
# save called by hook in `after_create_session` and every `after_run`
num_save_calls = 1 + max_steps // checkpoint_interval
sync_count_1, async_count_1 = _get_checkpoint_metrics_counts()
# save might be called one extra time in `end` hook based on timing of
# `_last_checkpoint_step` update in the final `after_run` call
self.assertIn(sync_count_1, [num_save_calls, num_save_calls + 1])
self.assertLessEqual(async_count_1, num_save_calls)
training_time_saved = metrics.GetTrainingTimeSaved(
api_label=async_checkpoint._ASYNC_CHECKPOINT_V1)
self.assertGreater(training_time_saved, 0)
def testAsyncCheckpointHookWithoutListeners(self):
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
checkpoint_interval = 5
keep_checkpoint_max = 10
config = tpu_config.RunConfig(
master=resolver.master(),
model_dir=os.path.join(FLAGS.model_dir, 'runconfig'),
save_checkpoints_steps=1000,
keep_checkpoint_max=keep_checkpoint_max+1, # off by one
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=checkpoint_interval,))
estimator = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=config,
train_batch_size=32,
eval_batch_size=32,
predict_batch_size=1,
params={},
)
max_steps = 100
estimator.train(
input_fn=input_fn,
max_steps=max_steps,
hooks=[
async_checkpoint.AsyncCheckpointSaverHook(
FLAGS.model_dir,
save_steps=checkpoint_interval)
])
current_step = estimator_lib._load_global_step_from_checkpoint_dir(
FLAGS.model_dir) # pylint: disable=protected-access
# TODO(power) -- identify a better way to count the number of checkpoints.
checkpoints = file_io.get_matching_files(
FLAGS.model_dir + '/model.ckpt*.meta')
checkpoint_count = len(checkpoints)
logging.info('Found %d checkpoints: %s', checkpoint_count, checkpoints)
self.assertLessEqual(checkpoint_count, keep_checkpoint_max)
self.assertEqual(current_step, max_steps)
# save called by hook in `after_create_session` and every `after_run`
num_save_calls = 1 + max_steps // checkpoint_interval
sync_count_1, async_count_1 = _get_checkpoint_metrics_counts()
# save might be called one extra time in `end` hook based on timing of
# `_last_checkpoint_step` update in the final `after_run` call
self.assertIn(sync_count_1, [num_save_calls, num_save_calls + 1])
self.assertLessEqual(async_count_1, num_save_calls)
training_time_saved = metrics.GetTrainingTimeSaved(
api_label=async_checkpoint._ASYNC_CHECKPOINT_V1)
self.assertGreater(training_time_saved, 0)
if __name__ == '__main__':
v2_compat.disable_v2_behavior()
test.main()
| AsyncCheckpointingTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 164527,
"end": 165724
} | class ____(Response):
"""
Response of tasks.delete_artifacts endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_artifacts"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteArtifactsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
| DeleteArtifactsResponse |
python | coleifer__peewee | peewee.py | {
"start": 163155,
"end": 163458
} | class ____(_StringField):
field_type = 'VARCHAR'
def __init__(self, max_length=255, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
def get_modifiers(self):
return self.max_length and [self.max_length] or None
| CharField |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 7033,
"end": 7096
} | class ____(HTTPClientError):
status_code = 400
| HTTPBadRequest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 81747,
"end": 81845
} | class ____(BaseModel):
connection_pool_size: int = Field(..., description="")
| P2pConfigTelemetry |
python | doocs__leetcode | solution/2500-2599/2596.Check Knight Tour Configuration/Solution.py | {
"start": 0,
"end": 510
} | class ____:
def checkValidGrid(self, grid: List[List[int]]) -> bool:
if grid[0][0]:
return False
n = len(grid)
pos = [None] * (n * n)
for i in range(n):
for j in range(n):
pos[grid[i][j]] = (i, j)
for (x1, y1), (x2, y2) in pairwise(pos):
dx, dy = abs(x1 - x2), abs(y1 - y2)
ok = (dx == 1 and dy == 2) or (dx == 2 and dy == 1)
if not ok:
return False
return True
| Solution |
python | encode__httpx | httpx/_exceptions.py | {
"start": 6421,
"end": 7098
} | class ____(StreamError):
"""
Attempted to read or stream content, but the content has already
been streamed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream some content, but the content has "
"already been streamed. For requests, this could be due to passing "
"a generator as request content, and then receiving a redirect "
"response or a secondary request as part of an authentication flow."
"For responses, this could be due to attempting to stream the response "
"content more than once."
)
super().__init__(message)
| StreamConsumed |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 15268,
"end": 15628
} | class ____(sgqlc.types.Enum):
"""Properties by which enterprise administrator invitation
connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order enterprise administrator member invitations
by creation time
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| EnterpriseAdministratorInvitationOrderField |
python | pytorch__pytorch | torch/ao/quantization/fx/_model_report/detector.py | {
"start": 4436,
"end": 8285
} | class ____(ABC):
r"""Base Detector Module
Any detector class should derive from this class.
Concrete detectors should follow the same general API, which includes:
- A method to calculate and return observer insertion points
- Should return both the fqns and the Observer class to insert
- A method to return a report based on the detector
- Should return a str-based report and dict info in Tuple[str,Dict] format
"""
def __init__(self) -> None:
super().__init__()
self.detector_config_info = None
@abstractmethod
def determine_observer_insert_points(self, model) -> dict:
r"""
Args
model (nn.Module or subclass): model to find observer insertion points
Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict.
This dict maps string keys to detector specific information
"""
@abstractmethod
def get_detector_name(self) -> str:
r"""Returns the name of the current detector"""
@abstractmethod
def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:
r"""Returns the DetectorQConfigInfo for each module_fqn relevant
Args
model (nn.Module or subclass): model to find observer insertion points
Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
A DetectorQConfigInfo with the information to generate a QConfig for a specific module
"""
def _get_targeting_node(
self, prepared_fx_model: GraphModule, target_fqn: str
) -> torch.fx.node.Node:
r"""
Takes in a GraphModule and the target_fqn and finds the node whose target is this fqn.
If it's not found, it means it is most likely inside a fused layer
We just go one layer up in terms of the fqn we are searching for until we find parent node
If we get to empty string, then we know that it doesn't exist
The reason for the recursion is that if the model that we are looking for got fused,
we will have module fqn as e.g. x.linear.0 but the graph will only have a node for the fused module,
which would have fqn as x.linear so they will not match.
To handle this, if we don't match, we then take off the last bit of the fqn e.g. x.linear.0 -> x.linear,
or more generally foo.bar.baz -> foo.bar and search again, this will allow us to locate the correct module
even in cases with fusion
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
target_fqn (str): The fqn of the layer we are trying to target
Returns the node object we are trying to add observers around
"""
for node in prepared_fx_model.graph.nodes:
# if the node's target is our target, return it
if node.target == target_fqn:
return node
# getting here means node not found
# if no "." we are already at base and failed
parent_fqn_sep_index = target_fqn.rfind(".")
if parent_fqn_sep_index == -1:
raise ValueError("passed in target_fqn not found in graph's targets.")
else:
# recursively call it with parent fqn
return self._get_targeting_node(
prepared_fx_model, target_fqn[:parent_fqn_sep_index]
)
@abstractmethod
def generate_detector_report(self, model) -> tuple[str, dict[str, Any]]:
r"""
Args
model (nn.Module or subclass): model to find observer insertion points
Returns a Tuple of two elements:
Str: string report of the suggested improvements
Dict: contains useful data collected by the observer pertinent to this report
"""
| DetectorBase |
python | kennethreitz__tablib | tests/test_tablib.py | {
"start": 1016,
"end": 19619
} | class ____(BaseTestCase):
"""Tablib test cases."""
def _test_export_data_in_all_formats(self, dataset, exclude=()):
all_formats = [
'json', 'yaml', 'csv', 'tsv', 'xls', 'xlsx', 'ods', 'html', 'jira',
'latex', 'df', 'rst',
]
for format_ in all_formats:
if format_ in exclude:
continue
dataset.export(format_)
def test_unknown_format(self):
with self.assertRaises(UnsupportedFormat):
data.export('??')
# A known format but uninstalled
del registry._formats['ods']
msg = (r"The 'ods' format is not available. You may want to install the "
"odfpy package \\(or `pip install \"tablib\\[ods\\]\"`\\).")
with self.assertRaisesRegex(UnsupportedFormat, msg):
data.export('ods')
def test_empty_append(self):
"""Verify append() correctly adds tuple with no headers."""
new_row = (1, 2, 3)
data.append(new_row)
# Verify width/data
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_empty_append_with_headers(self):
"""Verify append() correctly detects mismatch of number of
headers and data.
"""
data.headers = ['first', 'second']
new_row = (1, 2, 3, 4)
self.assertRaises(tablib.InvalidDimensions, data.append, new_row)
def test_set_headers_with_incorrect_dimension(self):
"""Verify headers correctly detects mismatch of number of
headers and data.
"""
data.append(self.john)
def set_header_callable():
data.headers = ['first_name']
self.assertRaises(tablib.InvalidDimensions, set_header_callable)
def test_add_column(self):
"""Verify adding column works with/without headers."""
data.append(['kenneth'])
data.append(['bessie'])
new_col = ['reitz', 'monke']
data.append_col(new_col)
self.assertEqual(data[0], ('kenneth', 'reitz'))
self.assertEqual(data.width, 2)
# With Headers
data.headers = ('fname', 'lname')
new_col = [21, 22]
data.append_col(new_col, header='age')
self.assertEqual(data['age'], new_col)
def test_add_column_no_data_no_headers(self):
"""Verify adding new column with no headers."""
new_col = ('reitz', 'monke')
data.append_col(new_col)
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
def test_add_column_with_header_ignored(self):
"""Verify append_col() ignores the header if data.headers has
not previously been set
"""
new_col = ('reitz', 'monke')
data.append_col(new_col, header='first_name')
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
self.assertEqual(data.headers, None)
def test_add_column_with_header_and_headers_only_exist(self):
"""Verify append_col() with header correctly detects mismatch when
headers exist but there is no existing row data
"""
data.headers = ['first_name']
# no data
new_col = ('allen')
def append_col_callable():
data.append_col(new_col, header='middle_name')
self.assertRaises(tablib.InvalidDimensions, append_col_callable)
def test_add_column_with_header_and_data_exists(self):
"""Verify append_col() works when headers and rows exists"""
data.headers = self.headers
data.append(self.john)
new_col = [10]
data.append_col(new_col, header='age')
self.assertEqual(data.height, 1)
self.assertEqual(data.width, len(self.john) + 1)
self.assertEqual(data['age'], new_col)
self.assertEqual(len(data.headers), len(self.headers) + 1)
def test_add_callable_column(self):
"""Verify adding column with values specified as callable."""
def new_col(x):
return x[0]
self.founders.append_col(new_col, header='first_again')
def test_header_slicing(self):
"""Verify slicing by headers."""
self.assertEqual(self.founders['first_name'],
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(self.founders['last_name'],
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(self.founders['gpa'],
[self.john[2], self.george[2], self.tom[2]])
def test_get_col(self):
"""Verify getting columns by index"""
self.assertEqual(
self.founders.get_col(list(self.headers).index('first_name')),
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('last_name')),
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('gpa')),
[self.john[2], self.george[2], self.tom[2]])
def test_data_slicing(self):
"""Verify slicing by data."""
# Slice individual rows
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[:1], [self.john])
self.assertEqual(self.founders[1:2], [self.george])
self.assertEqual(self.founders[-1], self.tom)
self.assertEqual(self.founders[3:], [])
# Slice multiple rows
self.assertEqual(self.founders[:], [self.john, self.george, self.tom])
self.assertEqual(self.founders[0:2], [self.john, self.george])
self.assertEqual(self.founders[1:3], [self.george, self.tom])
self.assertEqual(self.founders[2:], [self.tom])
def test_row_slicing(self):
"""Verify Row slicing. Issue #184."""
john = Row(self.john)
self.assertEqual(john[:], list(self.john[:]))
self.assertEqual(john[0:], list(self.john[0:]))
self.assertEqual(john[:2], list(self.john[:2]))
self.assertEqual(john[0:2], list(self.john[0:2]))
self.assertEqual(john[0:-1], list(self.john[0:-1]))
def test_delete(self):
"""Verify deleting from dataset works."""
# Delete from front of object
del self.founders[0]
self.assertEqual(self.founders[:], [self.george, self.tom])
# Verify dimensions, width should NOT change
self.assertEqual(self.founders.height, 2)
self.assertEqual(self.founders.width, 3)
# Delete from back of object
del self.founders[1]
self.assertEqual(self.founders[:], [self.george])
# Verify dimensions, width should NOT change
self.assertEqual(self.founders.height, 1)
self.assertEqual(self.founders.width, 3)
# Delete from invalid index
self.assertRaises(IndexError, self.founders.__delitem__, 3)
def test_str_no_columns(self):
d = tablib.Dataset(['a', 1], ['b', 2], ['c', 3])
output = '%s' % d
self.assertEqual(output.splitlines(), [
'a|1',
'b|2',
'c|3'
])
def test_unicode_append(self):
"""Passes in a single unicode character and exports."""
new_row = ('å', 'é')
data.append(new_row)
self._test_export_data_in_all_formats(data)
def test_datetime_append(self):
"""Passes in a single datetime and a single date and exports."""
new_row = (
datetime.datetime.now(),
datetime.datetime.today(),
)
data.append(new_row)
self._test_export_data_in_all_formats(data)
def test_separator_append(self):
for a in range(3):
data.append_separator('foobar')
for a in range(5):
data.append(['asdf', 'asdf', 'asdf'])
self._test_export_data_in_all_formats(data)
def test_book_export_no_exceptions(self):
"""Test that various exports don't error out."""
book = tablib.Databook()
book.add_sheet(data)
# These formats don't implement the book abstraction.
unsupported = ['csv', 'tsv', 'jira', 'latex', 'df']
self._test_export_data_in_all_formats(book, exclude=unsupported)
def test_book_unsupported_loading(self):
with self.assertRaises(UnsupportedFormat):
tablib.Databook().load('Any stream', 'csv')
def test_book_unsupported_export(self):
book = tablib.Databook().load('[{"title": "first", "data": [{"first_name": "John"}]}]', 'json')
with self.assertRaises(UnsupportedFormat):
book.export('csv')
def test_book_import_from_file(self):
xlsx_source = Path(__file__).parent / 'files' / 'founders.xlsx'
with open(str(xlsx_source), mode='rb') as fh:
book = tablib.Databook().load(fh, 'xlsx')
self.assertEqual(eval(book.json)[0]['title'], 'Feuille1')
def test_dataset_import_from_file(self):
xlsx_source = Path(__file__).parent / 'files' / 'founders.xlsx'
with open(str(xlsx_source), mode='rb') as fh:
dset = tablib.Dataset().load(fh, 'xlsx')
self.assertEqual(eval(dset.json)[0]['last_name'], 'Adams')
def test_auto_format_detect(self):
"""Test auto format detection."""
# html, jira, latex, rst are export only.
_xls = self.founders.export('xls')
self.assertEqual(tablib.detect_format(_xls), 'xls')
_xlsx = self.founders.export('xlsx')
self.assertEqual(tablib.detect_format(_xlsx), 'xlsx')
_ods = self.founders.export('ods')
self.assertEqual(tablib.detect_format(_ods), 'ods')
_df = self.founders.export('df')
self.assertEqual(tablib.detect_format(_df), 'df')
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
self.assertEqual(tablib.detect_format(_yaml), 'yaml')
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
self.assertEqual(tablib.detect_format(_json), 'json')
_csv = '1,2,3\n4,5,6\n7,8,9\n'
self.assertEqual(tablib.detect_format(_csv), 'csv')
_tsv = '1\t2\t3\n4\t5\t6\n7\t8\t9\n'
self.assertEqual(tablib.detect_format(_tsv), 'tsv')
_bunk = StringIO(
'¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertEqual(tablib.detect_format(_bunk), None)
def test_transpose(self):
"""Transpose a dataset."""
transposed_founders = self.founders.transpose()
first_row = transposed_founders[0]
second_row = transposed_founders[1]
self.assertEqual(transposed_founders.headers,
["first_name", "John", "George", "Thomas"])
self.assertEqual(first_row,
("last_name", "Adams", "Washington", "Jefferson"))
self.assertEqual(second_row,
("gpa", 90, 67, 50))
def test_transpose_multiple_headers(self):
data = tablib.Dataset()
data.headers = ("first_name", "last_name", "age")
data.append(('John', 'Adams', 90))
data.append(('George', 'Washington', 67))
data.append(('John', 'Tyler', 71))
self.assertEqual(data.transpose().transpose().dict, data.dict)
def test_row_stacking(self):
"""Row stacking."""
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
row_stacked = self.founders.stack(to_join)
for column in row_stacked.headers:
original_data = self.founders[column]
expected_data = original_data + original_data
self.assertEqual(row_stacked[column], expected_data)
def test_column_stacking(self):
"""Column stacking"""
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
column_stacked = self.founders.stack_cols(to_join)
for index, row in enumerate(column_stacked):
original_data = self.founders[index]
expected_data = original_data + original_data
self.assertEqual(row, expected_data)
self.assertEqual(column_stacked[0],
("John", "Adams", 90, "John", "Adams", 90))
def test_sorting(self):
"""Sort columns."""
sorted_data = self.founders.sort(col="first_name")
self.assertEqual(sorted_data.title, 'Founders')
first_row = sorted_data[0]
second_row = sorted_data[2]
third_row = sorted_data[1]
expected_first = self.founders[1]
expected_second = self.founders[2]
expected_third = self.founders[0]
self.assertEqual(first_row, expected_first)
self.assertEqual(second_row, expected_second)
self.assertEqual(third_row, expected_third)
def test_remove_duplicates(self):
"""Unique Rows."""
self.founders.append(self.john)
self.founders.append(self.george)
self.founders.append(self.tom)
self.assertEqual(self.founders[0], self.founders[3])
self.assertEqual(self.founders[1], self.founders[4])
self.assertEqual(self.founders[2], self.founders[5])
self.assertEqual(self.founders.height, 6)
self.founders.remove_duplicates()
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[1], self.george)
self.assertEqual(self.founders[2], self.tom)
self.assertEqual(self.founders.height, 3)
def test_wipe(self):
"""Purge a dataset."""
new_row = (1, 2, 3)
data.append(new_row)
# Verify width/data
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
data.wipe()
new_row = (1, 2, 3, 4)
data.append(new_row)
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_subset(self):
"""Create a subset of a dataset"""
rows = (0, 2)
columns = ('first_name', 'gpa')
data.headers = self.headers
data.append(self.john)
data.append(self.george)
data.append(self.tom)
# Verify data is truncated
subset = data.subset(rows=rows, cols=columns)
self.assertEqual(type(subset), tablib.Dataset)
self.assertEqual(subset.headers, list(columns))
self.assertEqual(subset._data[0].list, ['John', 90])
self.assertEqual(subset._data[1].list, ['Thomas', 50])
def test_formatters(self):
"""Confirm formatters are being triggered."""
def _formatter(cell_value):
return str(cell_value).upper()
self.founders.add_formatter('last_name', _formatter)
for name in [r['last_name'] for r in self.founders.dict]:
self.assertTrue(name.isupper())
def test_unicode_renders_markdown_table(self):
# add another entry to test right field width for
# integer
self.founders.append(('Old', 'Man', 100500))
self.assertEqual('first_name|last_name |gpa ', str(self.founders).split('\n')[0])
def test_pickle_unpickle_dataset(self):
before_pickle = self.founders.export('json')
founders = pickle.loads(pickle.dumps(self.founders))
self.assertEqual(founders.export('json'), before_pickle)
def test_databook_add_sheet_accepts_only_dataset_instances(self):
class NotDataset:
def append(self, item):
pass
dataset = NotDataset()
dataset.append(self.john)
self.assertRaises(tablib.InvalidDatasetType, book.add_sheet, dataset)
def test_databook_add_sheet_accepts_dataset_subclasses(self):
class DatasetSubclass(tablib.Dataset):
pass
# just checking if subclass of tablib.Dataset can be added to Databook
dataset = DatasetSubclass()
dataset.append(self.john)
dataset.append(self.tom)
try:
book.add_sheet(dataset)
except tablib.InvalidDatasetType:
self.fail("Subclass of tablib.Dataset should be accepted by Databook.add_sheet")
def test_databook_formatter_support_kwargs(self):
"""Test XLSX export with formatter configuration."""
self.founders.export('xlsx', freeze_panes=False)
def test_databook_formatter_with_new_lines(self):
"""Test XLSX export with new line in content."""
self.founders.append(('First\nSecond', 'Name', 42))
self.founders.export('xlsx')
def test_row_repr(self):
"""Row repr."""
# Arrange
john = Row(self.john)
# Act
output = str(john)
# Assert
self.assertEqual(output, "['John', 'Adams', 90]")
def test_row_pickle_unpickle(self):
"""Row __setstate__ and __getstate__."""
# Arrange
before_pickle = Row(self.john)
# Act
output = pickle.loads(pickle.dumps(before_pickle))
# Assert
self.assertEqual(output[0], before_pickle[0])
self.assertEqual(output[1], before_pickle[1])
self.assertEqual(output[2], before_pickle[2])
def test_row_lpush(self):
"""Row lpush."""
john = Row(self.john)
john.lpush(53)
self.assertEqual(john.list, [53, 'John', 'Adams', 90])
def test_row_append(self):
"""Row append."""
john = Row(self.john)
john.append('stuff')
self.assertEqual(john.list, ['John', 'Adams', 90, 'stuff'])
def test_row_contains(self):
"""Row __contains__."""
# Arrange
john = Row(self.john)
# Act / Assert
self.assertIn("John", john)
def test_row_no_tag(self):
"""Row has_tag."""
# Arrange
john = Row(self.john)
# Act / Assert
self.assertFalse(john.has_tag("not found"))
self.assertFalse(john.has_tag(None))
def test_row_has_tag(self):
"""Row has_tag."""
# Arrange
john = Row(self.john, tags=["tag1"])
# Act / Assert
self.assertTrue(john.has_tag("tag1"))
def test_row_has_tags(self):
"""Row has_tag."""
# Arrange
john = Row(self.john, tags=["tag1", "tag2"])
# Act / Assert
self.assertTrue(john.has_tag(["tag2", "tag1"]))
| TablibTestCase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/cwise_ops_test.py | {
"start": 7716,
"end": 11876
} | class ____(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
with self.subTest(use_gpu=use_gpu, x=x):
self._not(x, use_gpu)
for x in data:
for y in data:
with self.subTest(use_gpu=use_gpu, x=x, y=y):
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or,
use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args
y = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args
for use_gpu in [True, False]:
with self.subTest(use_gpu=use_gpu):
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool_).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool_).reshape(ys)
for use_gpu in [True, False]:
with self.subTest(xs=xs, ys=ys, use_gpu=use_gpu):
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args
y = np.random.randint(0, 2, 6).astype(np.bool_).reshape(3, 2, 1) # pylint: disable=too-many-function-args
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.subTest(f=f):
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
| LogicalOpTest |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_maryland_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_maryland_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_maryland_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidMarylandZip |
python | getsentry__sentry | tests/sentry/relocation/api/endpoints/test_cancel.py | {
"start": 596,
"end": 9401
} | class ____(APITestCase):
endpoint = "sentry-api-0-relocations-cancel"
method = "put"
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user(
email="owner", is_superuser=False, is_staff=True, is_active=True
)
self.superuser = self.create_user(is_superuser=True)
self.relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.IN_PROGRESS.value,
step=Relocation.Step.PREPROCESSING.value,
provenance=Relocation.Provenance.SELF_HOSTED.value,
want_org_slugs=["foo"],
want_usernames=["alice", "bob"],
latest_notified=Relocation.EmailKind.STARTED.value,
latest_task=OrderedTask.PREPROCESSING_SCAN.name,
latest_task_attempts=1,
)
@override_options({"staff.ga-rollout": True})
def test_good_staff_cancel_in_progress_at_next_step(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
response = self.get_success_response(self.relocation.uuid, status_code=200)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.VALIDATING.name
def test_good_cancel_in_progress_at_next_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(self.relocation.uuid, status_code=200)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.VALIDATING.name
def test_good_cancel_paused_at_next_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.PAUSE.value
self.relocation.save()
response = self.get_success_response(self.relocation.uuid, status_code=200)
assert response.data["status"] == Relocation.Status.PAUSE.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.VALIDATING.name
def test_good_cancel_in_progress_at_specified_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(
self.relocation.uuid, atStep=Relocation.Step.IMPORTING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.IMPORTING.name
def test_good_cancel_paused_at_specified_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.PAUSE.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, atStep=Relocation.Step.IMPORTING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.PAUSE.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.IMPORTING.name
def test_good_cancel_at_future_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(
self.relocation.uuid, atStep=Relocation.Step.NOTIFYING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.NOTIFYING.name
def test_good_already_cancelled(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.scheduled_cancel_at_step = Relocation.Step.IMPORTING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, atStep=Relocation.Step.IMPORTING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledCancelAtStep"] == Relocation.Step.IMPORTING.name
def test_good_already_failed(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.FAILURE.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, atStep=Relocation.Step.PREPROCESSING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.FAILURE.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert not response.data["scheduledCancelAtStep"]
def test_bad_not_found(self) -> None:
self.login_as(user=self.superuser, superuser=True)
does_not_exist_uuid = uuid4().hex
self.get_error_response(does_not_exist_uuid, status_code=404)
def test_bad_already_succeeded(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.SUCCESS.value
self.relocation.save()
response = self.get_error_response(self.relocation.uuid, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_NOT_CANCELLABLE_STATUS
def test_bad_invalid_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, atStep="nonexistent", status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_UNKNOWN_RELOCATION_STEP.substitute(
step="nonexistent"
)
def test_bad_unknown_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, atStep=Relocation.Step.UNKNOWN.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_CANCEL_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.UNKNOWN.name
)
def test_bad_current_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, atStep=Relocation.Step.PREPROCESSING.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_CANCEL_RELOCATION
def test_bad_past_step(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, atStep=Relocation.Step.UPLOADING.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_CANCEL_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.UPLOADING.name
)
def test_bad_last_step_specified(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, atStep=Relocation.Step.COMPLETED.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_CANCEL_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.COMPLETED.name
)
def test_bad_last_step_automatic(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.step = Relocation.Step.NOTIFYING.value
self.relocation.save()
response = self.get_error_response(self.relocation.uuid, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_CANCEL_RELOCATION
def test_bad_no_auth(self) -> None:
self.get_error_response(self.relocation.uuid, status_code=401)
def test_bad_no_superuser(self) -> None:
self.login_as(user=self.superuser, superuser=False)
self.get_error_response(self.relocation.uuid, status_code=403)
| CancelRelocationTest |
python | coleifer__peewee | playhouse/sqlite_udf.py | {
"start": 1017,
"end": 6999
} | class ____(dict):
def __init__(self, *args, **kwargs):
super(synchronized_dict, self).__init__(*args, **kwargs)
self._lock = threading.Lock()
def __getitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(synchronized_dict, self).__setitem__(key, value)
def __delitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__delitem__(key)
STATE = synchronized_dict()
SETTINGS = synchronized_dict()
# Class and function decorators.
def aggregate(*groups):
def decorator(klass):
for group in groups:
AGGREGATE_COLLECTION.setdefault(group, [])
AGGREGATE_COLLECTION[group].append(klass)
return klass
return decorator
def table_function(*groups):
def decorator(klass):
for group in groups:
TABLE_FUNCTION_COLLECTION.setdefault(group, [])
TABLE_FUNCTION_COLLECTION[group].append(klass)
return klass
return decorator
def udf(*groups):
def decorator(fn):
for group in groups:
UDF_COLLECTION.setdefault(group, [])
UDF_COLLECTION[group].append(fn)
return fn
return decorator
# Register aggregates / functions with connection.
def register_aggregate_groups(db, *groups):
seen = set()
for group in groups:
klasses = AGGREGATE_COLLECTION.get(group, ())
for klass in klasses:
name = getattr(klass, 'name', klass.__name__)
if name not in seen:
seen.add(name)
db.register_aggregate(klass, name)
def register_table_function_groups(db, *groups):
seen = set()
for group in groups:
klasses = TABLE_FUNCTION_COLLECTION.get(group, ())
for klass in klasses:
if klass.name not in seen:
seen.add(klass.name)
db.register_table_function(klass)
def register_udf_groups(db, *groups):
seen = set()
for group in groups:
functions = UDF_COLLECTION.get(group, ())
for function in functions:
name = function.__name__
if name not in seen:
seen.add(name)
db.register_function(function, name)
def register_groups(db, *groups):
register_aggregate_groups(db, *groups)
register_table_function_groups(db, *groups)
register_udf_groups(db, *groups)
def register_all(db):
register_aggregate_groups(db, *AGGREGATE_COLLECTION)
register_table_function_groups(db, *TABLE_FUNCTION_COLLECTION)
register_udf_groups(db, *UDF_COLLECTION)
# Begin actual user-defined functions and aggregates.
# Scalar functions.
@udf(CONTROL_FLOW)
def if_then_else(cond, truthy, falsey=None):
if cond:
return truthy
return falsey
@udf(DATE)
def strip_tz(date_str):
date_str = date_str.replace('T', ' ')
tz_idx1 = date_str.find('+')
if tz_idx1 != -1:
return date_str[:tz_idx1]
tz_idx2 = date_str.find('-')
if tz_idx2 > 13:
return date_str[:tz_idx2]
return date_str
@udf(DATE)
def human_delta(nseconds, glue=', '):
parts = (
(86400 * 365, 'year'),
(86400 * 30, 'month'),
(86400 * 7, 'week'),
(86400, 'day'),
(3600, 'hour'),
(60, 'minute'),
(1, 'second'),
)
accum = []
for offset, name in parts:
val, nseconds = divmod(nseconds, offset)
if val:
suffix = val != 1 and 's' or ''
accum.append('%s %s%s' % (val, name, suffix))
if not accum:
return '0 seconds'
return glue.join(accum)
@udf(FILE)
def file_ext(filename):
try:
res = os.path.splitext(filename)
except ValueError:
return None
return res[1]
@udf(FILE)
def file_read(filename):
try:
with open(filename) as fh:
return fh.read()
except:
pass
if sys.version_info[0] == 2:
@udf(HELPER)
def gzip(data, compression=9):
return buffer(zlib.compress(data, compression))
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
else:
@udf(HELPER)
def gzip(data, compression=9):
if isinstance(data, str):
data = bytes(data.encode('raw_unicode_escape'))
return zlib.compress(data, compression)
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
@udf(HELPER)
def hostname(url):
parse_result = urlparse(url)
if parse_result:
return parse_result.netloc
@udf(HELPER)
def toggle(key):
key = key.lower()
STATE[key] = ret = not STATE.get(key)
return ret
@udf(HELPER)
def setting(key, value=None):
if value is None:
return SETTINGS.get(key)
else:
SETTINGS[key] = value
return value
@udf(HELPER)
def clear_settings():
SETTINGS.clear()
@udf(HELPER)
def clear_toggles():
STATE.clear()
@udf(MATH)
def randomrange(start, end=None, step=None):
if end is None:
start, end = 0, start
elif step is None:
step = 1
return random.randrange(start, end, step)
@udf(MATH)
def gauss_distribution(mean, sigma):
try:
return random.gauss(mean, sigma)
except ValueError:
return None
@udf(MATH)
def sqrt(n):
try:
return math.sqrt(n)
except ValueError:
return None
@udf(MATH)
def tonumber(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except:
return None
@udf(STRING)
def substr_count(haystack, needle):
if not haystack or not needle:
return 0
return haystack.count(needle)
@udf(STRING)
def strip_chars(haystack, chars):
return haystack.strip(chars)
def _hash(constructor, *args):
hash_obj = constructor()
for arg in args:
hash_obj.update(arg)
return hash_obj.hexdigest()
# Aggregates.
| synchronized_dict |
python | huggingface__transformers | tests/pipelines/test_pipelines_keypoint_matching.py | {
"start": 1071,
"end": 7367
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_KEYPOINT_MATCHING_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
cls._dataset = datasets.load_dataset("hf-internal-testing/image-matching-dataset", split="train")
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
torch_dtype="float32",
):
image_matcher = KeypointMatchingPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
torch_dtype=torch_dtype,
)
examples = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
return image_matcher, examples
def run_pipeline_test(self, image_matcher, examples):
self._load_dataset()
outputs = image_matcher(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
outputs,
[
{
"keypoint_image_0": {"x": ANY(float), "y": ANY(float)},
"keypoint_image_1": {"x": ANY(float), "y": ANY(float)},
"score": ANY(float),
}
]
* 2, # 2 matches per image pair
)
# Accepts URL + PIL.Image + lists
outputs = image_matcher(
[
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
[self._dataset[0]["image"], self._dataset[1]["image"]],
[self._dataset[1]["image"], self._dataset[2]["image"]],
[self._dataset[2]["image"], self._dataset[0]["image"]],
]
)
self.assertEqual(
outputs,
[
[
{
"keypoint_image_0": {"x": ANY(float), "y": ANY(float)},
"keypoint_image_1": {"x": ANY(float), "y": ANY(float)},
"score": ANY(float),
}
]
* 2 # 2 matches per image pair
]
* 4, # 4 image pairs
)
@require_torch
def test_single_image(self):
self._load_dataset()
small_model = "magic-leap-community/superglue_outdoor"
image_matcher = pipeline("keypoint-matching", model=small_model)
with self.assertRaises(ValueError):
image_matcher(
self._dataset[0]["image"],
threshold=0.0,
)
with self.assertRaises(ValueError):
image_matcher(
[self._dataset[0]["image"]],
threshold=0.0,
)
@require_torch
def test_single_pair(self):
self._load_dataset()
small_model = "magic-leap-community/superglue_outdoor"
image_matcher = pipeline("keypoint-matching", model=small_model)
image_0: Image.Image = self._dataset[0]["image"]
image_1: Image.Image = self._dataset[1]["image"]
outputs = image_matcher((image_0, image_1), threshold=0.0)
output = outputs[0] # first match from image pair
self.assertAlmostEqual(output["keypoint_image_0"]["x"], 698, places=1)
self.assertAlmostEqual(output["keypoint_image_0"]["y"], 469, places=1)
self.assertAlmostEqual(output["keypoint_image_1"]["x"], 434, places=1)
self.assertAlmostEqual(output["keypoint_image_1"]["y"], 440, places=1)
self.assertAlmostEqual(output["score"], 0.9905, places=3)
@require_torch
def test_multiple_pairs(self):
self._load_dataset()
small_model = "magic-leap-community/superglue_outdoor"
image_matcher = pipeline("keypoint-matching", model=small_model)
image_0: Image.Image = self._dataset[0]["image"]
image_1: Image.Image = self._dataset[1]["image"]
image_2: Image.Image = self._dataset[2]["image"]
outputs = image_matcher(
[
(image_0, image_1),
(image_1, image_2),
(image_2, image_0),
],
threshold=1e-4,
)
# Test first pair (image_0, image_1)
output_0 = outputs[0][0] # First match from first pair
self.assertAlmostEqual(output_0["keypoint_image_0"]["x"], 698, places=1)
self.assertAlmostEqual(output_0["keypoint_image_0"]["y"], 469, places=1)
self.assertAlmostEqual(output_0["keypoint_image_1"]["x"], 434, places=1)
self.assertAlmostEqual(output_0["keypoint_image_1"]["y"], 440, places=1)
self.assertAlmostEqual(output_0["score"], 0.9905, places=3)
# Test second pair (image_1, image_2)
output_1 = outputs[1][0] # First match from second pair
self.assertAlmostEqual(output_1["keypoint_image_0"]["x"], 272, places=1)
self.assertAlmostEqual(output_1["keypoint_image_0"]["y"], 310, places=1)
self.assertAlmostEqual(output_1["keypoint_image_1"]["x"], 228, places=1)
self.assertAlmostEqual(output_1["keypoint_image_1"]["y"], 568, places=1)
self.assertAlmostEqual(output_1["score"], 0.9890, places=3)
# Test third pair (image_2, image_0)
output_2 = outputs[2][0] # First match from third pair
self.assertAlmostEqual(output_2["keypoint_image_0"]["x"], 385, places=1)
self.assertAlmostEqual(output_2["keypoint_image_0"]["y"], 677, places=1)
self.assertAlmostEqual(output_2["keypoint_image_1"]["x"], 689, places=1)
self.assertAlmostEqual(output_2["keypoint_image_1"]["y"], 351, places=1)
self.assertAlmostEqual(output_2["score"], 0.9900, places=3)
| KeypointMatchingPipelineTests |
python | kamyu104__LeetCode-Solutions | Python/web-crawler-multithreaded.py | {
"start": 1918,
"end": 3991
} | class ____(object):
NUMBER_OF_WORKERS = 8
def __init__(self):
self.__cv = threading.Condition()
self.__q = collections.deque()
self.__working_count = 0
def crawl(self, startUrl, htmlParser):
"""
:type startUrl: str
:type htmlParser: HtmlParser
:rtype: List[str]
"""
SCHEME = "http://"
def hostname(url):
pos = url.find('/', len(SCHEME))
if pos == -1:
return url
return url[:pos]
def worker(htmlParser, lookup):
while True:
with self.__cv:
while not self.__q:
self.__cv.wait()
from_url = self.__q.popleft()
if from_url is None:
break
self.__working_count += 1
name = hostname(from_url)
for to_url in htmlParser.getUrls(from_url):
if name != hostname(to_url):
continue
with self.__cv:
if to_url not in lookup:
lookup.add(to_url)
self.__q.append(to_url)
self.__cv.notifyAll()
with self.__cv:
self.__working_count -= 1
if not self.__q and not self.__working_count:
self.__cv.notifyAll()
workers = []
self.__q = collections.deque([startUrl])
lookup = set([startUrl])
for i in xrange(self.NUMBER_OF_WORKERS):
t = threading.Thread(target=worker, args=(htmlParser, lookup))
t.start()
workers.append(t)
with self.__cv:
while self.__q or self.__working_count:
self.__cv.wait()
for i in xrange(self.NUMBER_OF_WORKERS):
self.__q.append(None)
self.__cv.notifyAll()
for t in workers:
t.join()
return list(lookup)
| Solution2 |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 37611,
"end": 39725
} | class ____(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.fnet = FNetModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring
| FNetForTokenClassification |
python | getsentry__sentry-python | tests/test_client.py | {
"start": 34139,
"end": 40221
} | class ____:
def __init__(
self,
expected_events,
sampler_function=None,
sample_rate=None,
exception_to_raise=Exception,
):
# type: (int, Optional[Callable[[Event], Union[float, bool]]], Optional[float], type[Exception]) -> None
self.sampler_function_mock = (
None
if sampler_function is None
else mock.MagicMock(side_effect=sampler_function)
)
self.expected_events = expected_events
self.sample_rate = sample_rate
self.exception_to_raise = exception_to_raise
def init_sdk(self, sentry_init):
# type: (Callable[[*Any], None]) -> None
sentry_init(
error_sampler=self.sampler_function_mock, sample_rate=self.sample_rate
)
def raise_exception(self):
# type: () -> None
raise self.exception_to_raise()
@mock.patch("sentry_sdk.client.random.random", return_value=0.618)
@pytest.mark.parametrize(
"test_config",
(
# Baseline test with error_sampler only, both floats and bools
IssuesSamplerTestConfig(sampler_function=lambda *_: 1.0, expected_events=1),
IssuesSamplerTestConfig(sampler_function=lambda *_: 0.7, expected_events=1),
IssuesSamplerTestConfig(sampler_function=lambda *_: 0.6, expected_events=0),
IssuesSamplerTestConfig(sampler_function=lambda *_: 0.0, expected_events=0),
IssuesSamplerTestConfig(sampler_function=lambda *_: True, expected_events=1),
IssuesSamplerTestConfig(sampler_function=lambda *_: False, expected_events=0),
# Baseline test with sample_rate only
IssuesSamplerTestConfig(sample_rate=1.0, expected_events=1),
IssuesSamplerTestConfig(sample_rate=0.7, expected_events=1),
IssuesSamplerTestConfig(sample_rate=0.6, expected_events=0),
IssuesSamplerTestConfig(sample_rate=0.0, expected_events=0),
# error_sampler takes precedence over sample_rate
IssuesSamplerTestConfig(
sampler_function=lambda *_: 1.0, sample_rate=0.0, expected_events=1
),
IssuesSamplerTestConfig(
sampler_function=lambda *_: 0.0, sample_rate=1.0, expected_events=0
),
# Different sample rates based on exception, retrieved both from event and hint
IssuesSamplerTestConfig(
sampler_function=lambda event, _: {
"ZeroDivisionError": 1.0,
"AttributeError": 0.0,
}[event["exception"]["values"][0]["type"]],
exception_to_raise=ZeroDivisionError,
expected_events=1,
),
IssuesSamplerTestConfig(
sampler_function=lambda event, _: {
"ZeroDivisionError": 1.0,
"AttributeError": 0.0,
}[event["exception"]["values"][0]["type"]],
exception_to_raise=AttributeError,
expected_events=0,
),
IssuesSamplerTestConfig(
sampler_function=lambda _, hint: {
ZeroDivisionError: 1.0,
AttributeError: 0.0,
}[hint["exc_info"][0]],
exception_to_raise=ZeroDivisionError,
expected_events=1,
),
IssuesSamplerTestConfig(
sampler_function=lambda _, hint: {
ZeroDivisionError: 1.0,
AttributeError: 0.0,
}[hint["exc_info"][0]],
exception_to_raise=AttributeError,
expected_events=0,
),
# If sampler returns invalid value, we should still send the event
IssuesSamplerTestConfig(
sampler_function=lambda *_: "This is an invalid return value for the sampler",
expected_events=1,
),
),
)
def test_error_sampler(_, sentry_init, capture_events, test_config):
test_config.init_sdk(sentry_init)
events = capture_events()
try:
test_config.raise_exception()
except Exception:
capture_exception()
assert len(events) == test_config.expected_events
if test_config.sampler_function_mock is not None:
assert test_config.sampler_function_mock.call_count == 1
# Ensure two arguments (the event and hint) were passed to the sampler function
assert len(test_config.sampler_function_mock.call_args[0]) == 2
@pytest.mark.parametrize(
"opt,missing_flags",
[
# lazy mode with enable-threads, no warning
[{"enable-threads": True, "lazy-apps": True}, []],
[{"enable-threads": "true", "lazy-apps": b"1"}, []],
# preforking mode with enable-threads and py-call-uwsgi-fork-hooks, no warning
[{"enable-threads": True, "py-call-uwsgi-fork-hooks": True}, []],
[{"enable-threads": b"true", "py-call-uwsgi-fork-hooks": b"on"}, []],
# lazy mode, no enable-threads, warning
[{"lazy-apps": True}, ["--enable-threads"]],
[{"enable-threads": b"false", "lazy-apps": True}, ["--enable-threads"]],
[{"enable-threads": b"0", "lazy": True}, ["--enable-threads"]],
# preforking mode, no enable-threads or py-call-uwsgi-fork-hooks, warning
[{}, ["--enable-threads", "--py-call-uwsgi-fork-hooks"]],
[{"processes": b"2"}, ["--enable-threads", "--py-call-uwsgi-fork-hooks"]],
[{"enable-threads": True}, ["--py-call-uwsgi-fork-hooks"]],
[{"enable-threads": b"1"}, ["--py-call-uwsgi-fork-hooks"]],
[
{"enable-threads": b"false"},
["--enable-threads", "--py-call-uwsgi-fork-hooks"],
],
[{"py-call-uwsgi-fork-hooks": True}, ["--enable-threads"]],
],
)
def test_uwsgi_warnings(sentry_init, recwarn, opt, missing_flags):
uwsgi = mock.MagicMock()
uwsgi.opt = opt
with mock.patch.dict("sys.modules", uwsgi=uwsgi):
sentry_init(profiles_sample_rate=1.0)
if missing_flags:
assert len(recwarn) == 1
record = recwarn.pop()
for flag in missing_flags:
assert flag in str(record.message)
else:
assert not recwarn
| IssuesSamplerTestConfig |
python | ray-project__ray | doc/source/serve/doc_code/resnet50_example.py | {
"start": 356,
"end": 1963
} | class ____:
def __init__(self):
self.resnet50 = (
models.resnet50(weights=ResNet50_Weights.DEFAULT).eval().to("cpu")
)
self.preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
resp = requests.get(
"https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"
)
self.categories = resp.content.decode("utf-8").split("\n")
async def __call__(self, request: starlette.requests.Request) -> str:
uri = (await request.json())["uri"]
image_bytes = requests.get(uri).content
image = Image.open(BytesIO(image_bytes)).convert("RGB")
# Batch size is 1
input_tensor = torch.cat([self.preprocess(image).unsqueeze(0)]).to("cpu")
with torch.no_grad():
output = self.resnet50(input_tensor)
sm_output = torch.nn.functional.softmax(output[0], dim=0)
ind = torch.argmax(sm_output)
return self.categories[ind]
app = Model.bind()
# __serve_example_end__
if __name__ == "__main__":
import requests # noqa
serve.run(app)
resp = requests.post(
"http://localhost:8000/",
json={
"uri": "https://serve-resnet-benchmark-data.s3.us-west-1.amazonaws.com/000000000019.jpeg" # noqa
},
) # noqa
assert resp.text == "ox"
| Model |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 20712,
"end": 24623
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPSegEncoderLayer`].
Args:
config: CLIPSegConfig
"""
def __init__(self, config: CLIPSegConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| CLIPSegEncoder |
python | graphql-python__graphene | graphene/relay/node.py | {
"start": 2226,
"end": 3039
} | class ____(Interface):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, global_id_type=DefaultGlobalIDType, **options):
assert issubclass(
global_id_type, BaseGlobalIDType
), "Custom ID type need to be implemented as a subclass of BaseGlobalIDType."
_meta = InterfaceOptions(cls)
_meta.global_id_type = global_id_type
_meta.fields = {
"id": GlobalID(
cls, global_id_type=global_id_type, description="The ID of the object"
)
}
super(AbstractNode, cls).__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def resolve_global_id(cls, info, global_id):
return cls._meta.global_id_type.resolve_global_id(info, global_id)
| AbstractNode |
python | walkccc__LeetCode | solutions/2974. Minimum Number Game/2974.py | {
"start": 0,
"end": 192
} | class ____:
def numberGame(self, nums: list[int]) -> list[int]:
nums.sort()
return [nums[i + 1] if i % 2 == 0
else nums[i - 1]
for i in range(len(nums))]
| Solution |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 15166,
"end": 15733
} | class ____(_DeprecationTestCase):
# Deprecated in Numpy 2.4, 2025-07
message = r"Indexing flat iterators with a 0-dimensional boolean index"
def test_0d_boolean_index_deprecated(self):
arr = np.arange(3)
# 0d boolean indices on flat iterators are deprecated
self.assert_deprecated(lambda: arr.flat[True])
def test_0d_boolean_assign_index_deprecated(self):
arr = np.arange(3)
def assign_to_index():
arr.flat[True] = 10
self.assert_deprecated(assign_to_index)
| TestFlatiterIndexing0dBoolIndex |
python | django__django | tests/auth_tests/test_views.py | {
"start": 23789,
"end": 28580
} | class ____(AuthViewsTestCase):
def fail_login(self):
response = self.client.post(
"/login/",
{
"username": "testclient",
"password": "password",
},
)
self.assertFormError(
response,
AuthenticationForm.error_messages["invalid_login"]
% {"username": User._meta.get_field("username").verbose_name},
)
def logout(self):
self.client.post("/logout/")
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post(
"/password_change/",
{
"old_password": "donuts",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertFormError(
response, PasswordChangeForm.error_messages["password_incorrect"]
)
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "donuts",
},
)
self.assertFormError(
response, SetPasswordForm.error_messages["password_mismatch"]
)
def test_password_change_succeeds(self):
self.login()
self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.fail_login()
self.login(password="password1")
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(
response, "/password_change/done/", fetch_redirect_response=False
)
@override_settings(LOGIN_URL="/login/")
def test_password_change_done_fails(self):
response = self.client.get("/password_change/done/")
self.assertRedirects(
response,
"/login/?next=/password_change/done/",
fetch_redirect_response=False,
)
def test_password_change_redirect_default(self):
self.login()
response = self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(
response, "/password_change/done/", fetch_redirect_response=False
)
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post(
"/password_change/custom/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(response, "/custom/", fetch_redirect_response=False)
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post(
"/password_change/custom/named/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(
response, "/password_reset/", fetch_redirect_response=False
)
@modify_settings(
MIDDLEWARE={"append": "django.contrib.auth.middleware.LoginRequiredMiddleware"}
)
def test_access_under_login_required_middleware(self):
response = self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(
response,
settings.LOGIN_URL + "?next=/password_change/",
fetch_redirect_response=False,
)
self.login()
response = self.client.post(
"/password_change/",
{
"old_password": "password",
"new_password1": "password1",
"new_password2": "password1",
},
)
self.assertRedirects(
response, "/password_change/done/", fetch_redirect_response=False
)
| ChangePasswordTest |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/files.py | {
"start": 24805,
"end": 25498
} | class ____:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
self.download = async_to_custom_raw_response_wrapper(
files.download,
AsyncBinaryAPIResponse,
)
self.retrieve_metadata = _legacy_response.async_to_raw_response_wrapper(
files.retrieve_metadata,
)
self.upload = _legacy_response.async_to_raw_response_wrapper(
files.upload,
)
| AsyncFilesWithRawResponse |
python | apache__airflow | devel-common/src/tests_common/test_utils/system_tests_class.py | {
"start": 1964,
"end": 5363
} | class ____:
"""Base class for system tests."""
log: logging.Logger
@staticmethod
@pytest.fixture(autouse=True, scope="class")
def setup_logger(request):
klass = request.cls
klass.log = logging.getLogger(klass.__module__ + "." + klass.__name__)
@pytest.fixture(autouse=True)
def setup_system(self):
"""
Set up system tests.
We want to avoid random errors while database got reset - those
Are apparently triggered by parser trying to parse DAGs while
The tables are dropped. We move the dags temporarily out of the dags folder
and move them back after reset.
We also remove all logs from logs directory to have a clear log state and see only logs from this
test.
"""
print()
print("Removing all log files except previous_runs")
print()
logs_folder = resolve_logs_folder()
files = os.listdir(logs_folder)
for file in files:
file_path = os.path.join(logs_folder, file)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file) and file != "previous_runs":
shutil.rmtree(file_path, ignore_errors=True)
yield
# We save the logs to a separate directory so that we can see them later.
date_str = datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
logs_folder = resolve_logs_folder()
print()
print(f"Saving all log files to {logs_folder}/previous_runs/{date_str}")
print()
target_dir = os.path.join(logs_folder, "previous_runs", date_str)
Path(target_dir).mkdir(parents=True, exist_ok=True, mode=0o755)
files = os.listdir(logs_folder)
for file in files:
if file != "previous_runs":
file_path = os.path.join(logs_folder, file)
shutil.move(file_path, target_dir)
@staticmethod
def execute_cmd(*args, **kwargs):
executor = get_executor()
return executor.execute_cmd(*args, **kwargs)
@staticmethod
def check_output(*args, **kwargs):
executor = get_executor()
return executor.check_output(*args, **kwargs)
@staticmethod
def _print_all_log_files():
print()
print("Printing all log files")
print()
logs_folder = resolve_logs_folder()
for dirpath, _, filenames in os.walk(logs_folder):
if "/previous_runs" not in dirpath:
for name in filenames:
filepath = os.path.join(dirpath, name)
print()
print(f" ================ Content of {filepath} ===============================")
print()
with open(filepath) as f:
print(f.read())
@staticmethod
def create_dummy_file(filename, dir_path="/tmp"):
os.makedirs(dir_path, exist_ok=True)
full_path = os.path.join(dir_path, filename)
with open(full_path, "wb") as f:
f.write(os.urandom(1 * 1024 * 1024))
@staticmethod
def delete_dummy_file(filename, dir_path):
full_path = os.path.join(dir_path, filename)
with contextlib.suppress(FileNotFoundError):
os.remove(full_path)
if dir_path != "/tmp":
shutil.rmtree(dir_path, ignore_errors=True)
| SystemTest |
python | keras-team__keras | keras/src/distribution/distribution_lib_test.py | {
"start": 4355,
"end": 5561
} | class ____(testing.TestCase):
def setUp(self):
super().setUp()
devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["batch", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, devices
)
def test_init_with_device_mesh(self):
distribution = distribution_lib.Distribution(self.device_mesh)
self.assertIs(distribution.device_mesh, self.device_mesh)
def test_scope(self):
distribution_1 = distribution_lib.Distribution(self.device_mesh)
distribution_2 = distribution_lib.Distribution(self.device_mesh)
self.assertIsNone(distribution_lib.distribution())
with distribution_1.scope():
self.assertIs(distribution_lib.distribution(), distribution_1)
with distribution_2.scope():
self.assertIs(distribution_lib.distribution(), distribution_2)
self.assertIs(distribution_lib.distribution(), distribution_1)
self.assertIsNone(distribution_lib.distribution())
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Only JAX has the proper backend distribution lib",
)
| DistributionTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-firebase-realtime-database/source_firebase_realtime_database/source.py | {
"start": 433,
"end": 5787
} | class ____(Source):
DEFAULT_BUFFER_SIZE = 10000
DEFAULT_PATH = ""
@staticmethod
def stream_name_from(config):
path = config.get("path", SourceFirebaseRealtimeDatabase.DEFAULT_PATH)
node_name = path.rstrip("/").split("/")[-1]
if not node_name:
node_name = config["database_name"]
return node_name.replace("-", "_")
def check(self, logger: logging.Logger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to Firebase Realtime Database
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.yaml file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
database_name = config["database_name"]
google_application_credentials = config["google_application_credentials"]
client = Client()
client.initialize(database_name, google_application_credentials)
# get root-node's keys to check connectivity
client.check_connection()
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: logging.Logger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in the users's connection to Firebase.
This connector returns only one stream that is specified by `path` in the config.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.yaml file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"key": {"type": "string"},
"value": {"type": "string"},
},
}
stream_name = self.stream_name_from(config)
streams.append(AirbyteStream(name=stream_name, json_schema=json_schema, supported_sync_modes=["full_refresh"]))
return AirbyteCatalog(streams=streams)
def read(
self, logger: logging.Logger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.yaml file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream = catalog.streams[0].stream
stream_name = stream.name
buffer_size = config.get("buffer_size", self.DEFAULT_BUFFER_SIZE)
path = config.get("path", self.DEFAULT_PATH)
database_name = config["database_name"]
google_application_credentials = config["google_application_credentials"]
client = Client(path, buffer_size)
client.initialize(database_name, google_application_credentials)
for data in client.extract():
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
| SourceFirebaseRealtimeDatabase |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/model_service.py | {
"start": 26306,
"end": 29753
} | class ____(GoogleCloudBaseOperator):
"""
Adds version aliases for the Model.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model_id: Required. The ID of the model to add version aliases for.
Should be in format `projects/{project}/locations/{location}/models/{model_id}@{version_id}` or
`projects/{project}/locations/{location}/models/{model_id}@{version_alias}`.
:param version_aliases: List of version aliases to be added to model version.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("model_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIModelLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
model_id: str,
version_aliases: Sequence[str],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.model_id = model_id
self.version_aliases = version_aliases
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info(
"Adding aliases %s to model version %s", self.version_aliases, self.model_id.rpartition("@")[0]
)
updated_model = hook.add_version_aliases(
region=self.region,
model_id=self.model_id,
version_aliases=self.version_aliases,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIModelLink.persist(context=context, model_id=self.model_id)
return Model.to_dict(updated_model)
| AddVersionAliasesOnModelOperator |
python | encode__django-rest-framework | tests/test_relations_pk.py | {
"start": 1445,
"end": 1601
} | class ____(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
fields = ('id', 'name', 'target')
| ForeignKeySourceSerializer |
python | getsentry__sentry | src/sentry/utils/event_frames.py | {
"start": 1062,
"end": 7720
} | class ____:
frame_munger: FrameMunger
requires_sdk: bool = False
supported_sdks: set[str] = field(default_factory=set)
def java_frame_munger(frame: EventFrame) -> str | None:
stacktrace_path = None
if not frame.module or not frame.abs_path:
logger.warning("Module or absPath is missing", extra={"frame": frame})
return None
from sentry.issues.auto_source_code_config.errors import (
DoesNotFollowJavaPackageNamingConvention,
)
from sentry.issues.auto_source_code_config.frame_info import get_path_from_module
try:
_, stacktrace_path = get_path_from_module(frame.module, frame.abs_path)
except DoesNotFollowJavaPackageNamingConvention:
pass
except Exception:
# Report but continue
logger.exception("Investigate. Error munging java frame")
return stacktrace_path
def cocoa_frame_munger(frame: EventFrame) -> str | None:
if not frame.package or not frame.abs_path:
return None
rel_path = package_relative_path(frame.abs_path, frame.package)
if rel_path:
return rel_path
logger.warning(
"sentry.issues.frame_munging.failure",
extra={"platform": "cocoa", "frame": frame},
)
return None
def flutter_frame_munger(frame: EventFrame) -> str | None:
if not frame.abs_path:
return None
abs_path = str(frame.abs_path)
if abs_path.startswith("dart:"):
return None
elif abs_path.startswith("package:"):
if not frame.package:
return None
pkg = frame.package
if abs_path.find(f"package:{pkg}") == -1:
return None
else:
src_path = abs_path.replace(f"package:{pkg}", "", 1).strip("/")
if src_path:
return src_path
return None
def package_relative_path(abs_path: str | None, package: str | None) -> str | None:
"""
returns the left-biased shortened path relative to the package directory
"""
if not abs_path or not package:
return None
package = package.strip("/")
paths = abs_path.strip("/").split("/")
for idx, path in enumerate(paths):
if path == package:
return "/".join(paths[idx:])
return None
PLATFORM_FRAME_MUNGER: dict[str, SdkFrameMunger] = {
"java": SdkFrameMunger(java_frame_munger),
"cocoa": SdkFrameMunger(cocoa_frame_munger),
"other": SdkFrameMunger(flutter_frame_munger, True, {"sentry.dart.flutter"}),
}
def get_sdk_name(event_data: PathSearchable) -> str | None:
return get_path(event_data, "sdk", "name", filter=True) or None
def try_munge_frame_path(
frame: EventFrame,
platform: str | None = None,
sdk_name: str | None = None,
) -> str | None:
"""
Applies platform-specific frame munging for filename pathing.
If munging was successful, return the munged filename, otherwise return None.
"""
munger = platform and PLATFORM_FRAME_MUNGER.get(platform)
if not munger or (munger.requires_sdk and sdk_name not in munger.supported_sdks):
return None
munged_filename = munger.frame_munger(frame)
metrics.incr(
"sentry.issues.frame_munging",
tags={"platform": platform, "outcome": "success" if munged_filename else "failure"},
)
return munged_filename
def munged_filename_and_frames(
platform: str | None,
data_frames: Sequence[Mapping[str, Any]],
key: str = "munged_filename",
sdk_name: str | None = None,
) -> tuple[str, Sequence[Mapping[str, Any]]] | None:
"""
Applies platform-specific frame munging for filename pathing.
Returns the key used to insert into the frames and a deepcopy of the input data_frames with munging applied,
otherwise returns None.
"""
if platform is None:
return None
munger = PLATFORM_FRAME_MUNGER.get(platform)
if not munger or (munger.requires_sdk and sdk_name not in munger.supported_sdks):
return None
copy_frames: Sequence[MutableMapping[str, Any]] = cast(
Sequence[MutableMapping[str, Any]], deepcopy(data_frames)
)
frames_updated = False
for frame in copy_frames:
munged_filename = munger.frame_munger(EventFrame.from_dict(frame))
if munged_filename:
frame[key] = munged_filename
frames_updated = True
return (key, copy_frames) if frames_updated else None
def get_crashing_thread(
thread_frames: Sequence[Mapping[str, Any]] | None,
) -> Mapping[str, Any] | None:
if not thread_frames:
return None
if len(thread_frames) == 1:
return thread_frames[0]
filtered = [x for x in thread_frames if x and x.get("crashed")]
if len(filtered) == 1:
return filtered[0]
filtered = [x for x in thread_frames if x and x.get("current")]
if len(filtered) == 1:
return filtered[0]
return None
def find_stack_frames(
event_data: PathSearchable, consume_frame: Callable[[Any], None] = lambda _: None
) -> Sequence[Mapping[str, Any]]:
"""
See: https://develop.sentry.dev/sdk/event-payloads/#core-interfaces for event data payload format.
Waterfall logic for searching for stack frames in an event:
- `exception` interface for any 'stacktrace' frames.
- 'stacktrace' interface
- 'threads' interface for the relevant 'crashing' thread stack frames
"""
frames = []
stacktrace_in_exception = False
for exc in get_path(event_data, "exception", "values", filter=True) or ():
for frame in get_path(exc, "stacktrace", "frames", filter=True) or ():
consume_frame(frame)
frames.append(frame)
stacktrace_in_exception = True
if not stacktrace_in_exception:
# according to: https://develop.sentry.dev/sdk/event-payloads/stacktrace/
# stacktrace interface shouldn't be a top-level event property, so the next statement could be useless
# potentially here for backwards compatibility
frames = get_path(event_data, "stacktrace", "frames", filter=True) or []
if not frames:
threads = get_path(event_data, "threads", "values", filter=True) or get_path(
event_data, "threads", filter=True
)
# Handles edge case where the second call to get_path doesn't return a list of threads
if threads == {"values": None}:
threads = None
thread = get_crashing_thread(threads)
if thread is not None:
frames = get_path(thread, "stacktrace", "frames") or []
for frame in frames or ():
consume_frame(frame)
return frames
| SdkFrameMunger |
python | django__django | tests/validators/tests.py | {
"start": 29518,
"end": 31805
} | class ____(SimpleTestCase):
def test_validators(self):
for validator, value, expected in TEST_DATA:
name = (
validator.__name__
if isinstance(validator, types.FunctionType)
else validator.__class__.__name__
)
exception_expected = expected is not None and issubclass(
expected, Exception
)
with self.subTest(name, value=value):
if (
validator is validate_image_file_extension
and not PILLOW_IS_INSTALLED
):
self.skipTest(
"Pillow is required to test validate_image_file_extension."
)
if exception_expected:
with self.assertRaises(expected):
validator(value)
else:
self.assertEqual(expected, validator(value))
def test_single_message(self):
v = ValidationError("Not Valid")
self.assertEqual(str(v), "['Not Valid']")
self.assertEqual(repr(v), "ValidationError(['Not Valid'])")
def test_message_list(self):
v = ValidationError(["First Problem", "Second Problem"])
self.assertEqual(str(v), "['First Problem', 'Second Problem']")
self.assertEqual(
repr(v), "ValidationError(['First Problem', 'Second Problem'])"
)
def test_message_dict(self):
v = ValidationError({"first": ["First Problem"]})
self.assertEqual(str(v), "{'first': ['First Problem']}")
self.assertEqual(repr(v), "ValidationError({'first': ['First Problem']})")
def test_regex_validator_flags(self):
msg = "If the flags are set, regex must be a regular expression string."
with self.assertRaisesMessage(TypeError, msg):
RegexValidator(re.compile("a"), flags=re.IGNORECASE)
def test_max_length_validator_message(self):
v = MaxLengthValidator(
16, message='"%(value)s" has more than %(limit_value)d characters.'
)
with self.assertRaisesMessage(
ValidationError, '"djangoproject.com" has more than 16 characters.'
):
v("djangoproject.com")
| TestValidators |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 70951,
"end": 81508
} | class ____(_fixtures.FixtureTest):
run_inserts = None
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
item_keywords,
lazy="joined",
order_by=keywords.c.name,
)
),
)
data = [
Item,
{
"description": "mm_item1",
"keywords": (
Keyword,
[
{"name": "big"},
{"name": "green"},
{"name": "purple"},
{"name": "round"},
],
),
},
{
"description": "mm_item2",
"keywords": (
Keyword,
[
{"name": "blue"},
{"name": "imnew"},
{"name": "round"},
{"name": "small"},
],
),
},
{"description": "mm_item3", "keywords": (Keyword, [])},
{
"description": "mm_item4",
"keywords": (Keyword, [{"name": "big"}, {"name": "blue"}]),
},
{
"description": "mm_item5",
"keywords": (
Keyword,
[{"name": "big"}, {"name": "exacting"}, {"name": "green"}],
),
},
{
"description": "mm_item6",
"keywords": (
Keyword,
[{"name": "red"}, {"name": "round"}, {"name": "small"}],
),
},
]
session = fixture_session()
objects = []
_keywords = {k.name: k for k in session.query(Keyword)}
for elem in data[1:]:
item = Item(description=elem["description"])
objects.append(item)
for spec in elem["keywords"][1]:
keyword_name = spec["name"]
try:
kw = _keywords[keyword_name]
except KeyError:
_keywords[keyword_name] = kw = Keyword(name=keyword_name)
item.keywords.append(kw)
session.add_all(objects)
session.flush()
result = (
session.query(Item)
.filter(Item.description.in_([e["description"] for e in data[1:]]))
.order_by(Item.description)
.all()
)
self.assert_result(result, *data)
objects[4].description = "item4updated"
k = Keyword()
k.name = "yellow"
objects[5].keywords.append(k)
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL(
"UPDATE items SET description=:description "
"WHERE items.id = :items_id",
{"description": "item4updated", "items_id": objects[4].id},
),
CompiledSQL(
"INSERT INTO keywords (name) VALUES (:name)",
{"name": "yellow"},
),
CompiledSQL(
"INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: [{"item_id": objects[5].id, "keyword_id": k.id}],
),
)
objects[2].keywords.append(k)
dkid = objects[5].keywords[1].id
del objects[5].keywords[1]
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL(
"DELETE FROM item_keywords "
"WHERE item_keywords.item_id = :item_id AND "
"item_keywords.keyword_id = :keyword_id",
[{"item_id": objects[5].id, "keyword_id": dkid}],
),
CompiledSQL(
"INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: [{"item_id": objects[2].id, "keyword_id": k.id}],
),
)
session.delete(objects[3])
session.flush()
def test_many_to_many_remove(self):
"""Setting a collection to empty deletes many-to-many rows.
Tests that setting a list-based attribute to '[]' properly affects the
history and allows the many-to-many rows to be deleted
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(Keyword, item_keywords, lazy="joined")
),
)
i = Item(description="i1")
k1 = Keyword(name="k1")
k2 = Keyword(name="k2")
i.keywords.append(k1)
i.keywords.append(k2)
session = fixture_session()
session.add(i)
session.flush()
conn = session.connection()
eq_(conn.scalar(select(func.count("*")).select_from(item_keywords)), 2)
i.keywords = []
session.flush()
eq_(conn.scalar(select(func.count("*")).select_from(item_keywords)), 0)
def test_scalar(self):
"""sa.dependency won't delete an m2m relationship referencing None."""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keyword=relationship(
Keyword, secondary=item_keywords, uselist=False
)
),
)
i = Item(description="x")
session = fixture_session()
session.add(i)
session.flush()
session.delete(i)
session.flush()
def test_many_to_many_update(self):
"""Assorted history operations on a many to many"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="joined",
order_by=keywords.c.name,
)
),
)
k1 = Keyword(name="keyword 1")
k2 = Keyword(name="keyword 2")
k3 = Keyword(name="keyword 3")
item = Item(description="item 1")
item.keywords.extend([k1, k2, k3])
session = fixture_session()
session.add(item)
session.flush()
item.keywords = []
item.keywords.append(k1)
item.keywords.append(k2)
session.flush()
session.expunge_all()
item = session.get(Item, item.id)
eq_(item.keywords, [k1, k2])
def test_association(self):
"""Basic test of an association object"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
class IKAssociation(ComparableEntity):
pass
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
IKAssociation,
item_keywords,
primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id],
properties=dict(
keyword=relationship(
Keyword,
lazy="joined",
uselist=False,
# note here is a valid place where
# order_by can be used on a scalar
# relationship(); to determine eager
# ordering of the parent object within
# its collection.
order_by=keywords.c.name,
)
),
)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(IKAssociation, lazy="joined")
),
)
session = fixture_session()
def fixture():
_kw = {k.name: k for k in session.query(Keyword)}
for n in (
"big",
"green",
"purple",
"round",
"huge",
"violet",
"yellow",
"blue",
):
if n not in _kw:
_kw[n] = Keyword(name=n)
def assocs(*names):
return [
IKAssociation(keyword=kw) for kw in [_kw[n] for n in names]
]
return [
Item(
description="a_item1",
keywords=assocs("big", "green", "purple", "round"),
),
Item(
description="a_item2",
keywords=assocs("huge", "violet", "yellow"),
),
Item(description="a_item3", keywords=assocs("big", "blue")),
]
session.add_all(fixture())
session.flush()
eq_(fixture(), session.query(Item).order_by(Item.description).all())
| ManyToManyTest |
python | walkccc__LeetCode | solutions/159. Longest Substring with At Most Two Distinct Characters/159.py | {
"start": 0,
"end": 426
} | class ____:
def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:
ans = 0
distinct = 0
count = [0] * 128
l = 0
for r, c in enumerate(s):
count[ord(c)] += 1
if count[ord(c)] == 1:
distinct += 1
while distinct == 3:
count[ord(s[l])] -= 1
if count[ord(s[l])] == 0:
distinct -= 1
l += 1
ans = max(ans, r - l + 1)
return ans
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_sheet_format_pr.py | {
"start": 301,
"end": 835
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_format_pr() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_format_pr(self):
"""Test the _write_sheet_format_pr() method"""
self.worksheet._write_sheet_format_pr()
exp = """<sheetFormatPr defaultRowHeight="15"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteSheetFormatPr |
python | weaviate__weaviate-python-client | weaviate/gql/filter.py | {
"start": 9513,
"end": 12064
} | class ____(Filter):
"""Ask class used to filter weaviate objects by asking a question."""
def __init__(self, content: dict):
"""Initialize a Ask class instance.
Args:
content: The content of the `ask` clause.
Raises:
TypeError: If 'content' is not of type dict.
ValueError: If 'content' has key "certainty"/"distance" but the value is not float.
TypeError: If 'content' has key "properties" but the type is not list or str.
"""
super().__init__(content)
if "question" not in self._content:
raise ValueError('Mandatory "question" key not present in the "content"!')
_check_type(var_name="question", value=self._content["question"], dtype=str)
if "certainty" in self._content:
if "distance" in self._content:
raise ValueError(
"Cannot have both 'certainty' and 'distance' at the same time. "
"Only one is accepted."
)
_check_type(var_name="certainty", value=self._content["certainty"], dtype=float)
if "distance" in self._content:
_check_type(var_name="distance", value=self._content["distance"], dtype=float)
if "autocorrect" in self._content:
_check_type(var_name="autocorrect", value=self._content["autocorrect"], dtype=bool)
if "rerank" in self._content:
_check_type(var_name="rerank", value=self._content["rerank"], dtype=bool)
if "properties" in self._content:
_check_type(
var_name="properties",
value=self._content["properties"],
dtype=(list, str),
)
if isinstance(self._content["properties"], str):
self._content["properties"] = [self._content["properties"]]
def __str__(self) -> str:
ask = f"ask: {{question: {dumps(self._content['question'])}"
if "certainty" in self._content:
ask += f" certainty: {self._content['certainty']}"
if "distance" in self._content:
ask += f" distance: {self._content['distance']}"
if "properties" in self._content:
ask += f" properties: {dumps(self._content['properties'])}"
if "autocorrect" in self._content:
ask += f" autocorrect: {_bool_to_str(self._content['autocorrect'])}"
if "rerank" in self._content:
ask += f" rerank: {_bool_to_str(self._content['rerank'])}"
return ask + "} "
| Ask |
python | pytorch__pytorch | torch/_prims_common/__init__.py | {
"start": 46389,
"end": 71053
} | class ____(Enum):
NEW = (0,)
VIEW = (1,)
INPLACE = (2,)
NONE = (3,)
# TODO: when NumberType contains the sym types, can simplify this
def number_type(
x: Union[NumberType, torch.SymInt, torch.SymFloat, torch.SymBool],
) -> type:
if isinstance(x, torch.SymInt):
return int
elif isinstance(x, torch.SymFloat):
return float
elif isinstance(x, torch.SymBool):
return bool
else:
return type(x)
def expr_type(x: sympy.Basic) -> type:
import sympy
if x.kind is sympy.core.kind.BooleanKind:
return bool
elif x.is_integer: # type: ignore[attr-defined]
return int
else:
# NB: Not strictly correct, but we don't support SymPy complex or bool.
return float
# TODO: document type promotion kinds
def elementwise_dtypes(
*_args,
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
) -> tuple[torch.dtype, torch.dtype]:
"""
Computes the computation and result dtypes for elementwise type promotion
on the given arguments and with the given elementwise type promotion kind.
Note that not all inputs to an elementwise operation necessarily participate in type promotion.
For example, the "alpha" parameter of torch.add does not participate in type promotion,
although it may be cast to the Python type corresponding to the computation dtype that
the type promotion algorithm determines.
Default elementwise type promotion, which all other type promotion kinds tweak (see below),
first decides which of four ordered types to use:
bool -> integer -> floating point -> complex
The selected type is the "lowest" type in the above list such that all number arguments
have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
type for their dtype.
Once the type is determined, the particular result dtype is found. The dtypes are
partially ordered as follows:
bool -> uint8, int8 -> int16 -> int32 -> int64 ->
float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
The result dtype is selected by:
- if no tensor's dtype has the same corresponding type as the one selected,
then the result dtype is the (default) dtype corresponding to the selected type
(for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
- if the result type is complex then the dtype is:
- the default complex dtype if there are no floating point or complex tensors
- if there are floating point or complex tensors with one or more dimensions, then
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
(for example, double + cfloat -> cdouble)
- if there are only floating point or complex tensors with zero dimensions, then
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
- if the first two cases do not apply, the result dtype is the highest dtype among
all tensors with one or more dimensions of the output type, and if there are no such
tensors then it's the highest dtype among all tensors with zero dimensions of the output type
(for example, long + half -> half, even if the half tensor has zero dimensions)
The "corresponding complex dtypes" are:
float16 -> complex32
bfloat16 -> complex64
float32 -> complex64
float64 -> complex128
complex32 -> complex32
complex64 -> complex64
complex128 -> complex128
The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
dtype by mapping low precision floating point and complex dtypes as follows:
float16 -> float32
bfloat16 -> float32
complex32 -> complex64
This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
which perform no mathematical operations on their tensors (see below for examples).
The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype,
and computation dtypes to the appropriate op math dtype.
The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
mapping:
complex32 -> float16
complex64 -> float32
complex128 -> float64
Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
Example operators for each type promotion option:
DEFAULT : add
NO_OPMATH : where, nextafter, cat
INT_TO_FLOAT : sin
COMPLEX_TO_FLOAT : abs
BOOL_TO_LONG : pow
ALWAYS_BOOL : eq
"""
args = tuple(x for x in _args if x is not None)
highest_type: type = bool
# Import sympy locally, as importing it eagerly at a module level is too slow
# See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589
import sympy
for x in args:
if not isinstance(x, (Number, TensorLike, sympy.Basic)):
msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!"
raise ValueError(msg)
if isinstance(x, Number):
highest_type = get_higher_type(highest_type, number_type(x))
elif isinstance(x, sympy.Basic):
highest_type = get_higher_type(highest_type, expr_type(x))
else:
# x is a TensorLike
highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
result_dtype = None
def _find_highest_dtype_filtered(
args, filter, *, float_as_complex=False
) -> Optional[torch.dtype]:
zero_dim_tensor_dtype = None
one_plus_dim_tensor_dtype = None
for x in args:
if isinstance(x, TensorLike) and filter(x.dtype):
_dtype = x.dtype
if float_as_complex and is_float_dtype(_dtype):
_dtype = corresponding_complex_dtype(_dtype)
if x.ndim == 0:
zero_dim_tensor_dtype = get_higher_dtype(
zero_dim_tensor_dtype, _dtype
)
else:
# x.ndim > 0
one_plus_dim_tensor_dtype = get_higher_dtype(
one_plus_dim_tensor_dtype, _dtype
)
# Prefers dtype of tensors with one or more dimensions
if one_plus_dim_tensor_dtype is not None:
# pyrefly: ignore [bad-return]
return one_plus_dim_tensor_dtype
# pyrefly: ignore [bad-return]
return zero_dim_tensor_dtype
if highest_type is float:
result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
result_dtype = (
torch.get_default_dtype() if result_dtype is None else result_dtype
)
elif highest_type is complex:
result_dtype = _find_highest_dtype_filtered(
args,
lambda x: is_float_dtype(x) or is_complex_dtype(x),
float_as_complex=True,
)
if result_dtype is None:
result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
elif highest_type is int:
result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
result_dtype = torch.long if result_dtype is None else result_dtype
else:
# highest_type is bool
result_dtype = torch.bool
if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
return result_dtype, result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
result_dtype = torch.get_default_dtype()
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
# NOTE: computation can still occur in a complex dtype
computation_dtype = get_computation_dtype(result_dtype)
if is_complex_dtype(result_dtype):
result_dtype = corresponding_real_dtype(result_dtype)
return computation_dtype, result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
if is_boolean_dtype(result_dtype):
return torch.long, torch.long
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
return get_computation_dtype(result_dtype), torch.bool
else:
raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}")
def reduction_dtypes(
arg,
output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
dtype: Optional[torch.dtype] = None,
) -> tuple[torch.dtype, Optional[torch.dtype]]:
# even though some reductions, like amin or amax, don't strictly require type promotion,
# all the math ops (including comparisons) are still defined only for a computation type,
# so promotion will still happen. We are doing it explicitly here
inp_dtype = dtype if dtype is not None else arg.dtype
computation_dtype = get_computation_dtype(inp_dtype)
if (
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
):
result_dtype = dtype if dtype else arg.dtype
if (
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
and is_complex_dtype(result_dtype)
):
result_dtype = corresponding_real_dtype(result_dtype)
elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
result_dtype = None
else: # ALWAYS_BOOL
result_dtype = torch.bool
return computation_dtype, result_dtype
# This function's logic is borrowed from the following functions defined in C++:
# batched_matrix_contiguous_strides and contiguous_strides
def make_contiguous_strides_for(
shape: ShapeType, row_major: bool = True
) -> tuple[Union[_IntLikeT, int], ...]:
"""
Returns the strides of a contiguous tensor if row_major
If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
"""
# contiguous_strides from c10/util/strides.h
validate_shape(shape)
if not shape:
return ()
from torch.fx.experimental.symbolic_shapes import is_nested_int
multiplier: Union[_IntLikeT, int] = 1
strides = []
for l in reversed(shape):
strides.append(multiplier)
multiplier *= l if is_nested_int(l) else sym_max(l, 1) # type:ignore[assignment]
result = tuple(reversed(strides))
# batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h
if row_major:
return result
else:
if len(shape) < 2:
return result
return result[:-2] + (1, max(shape[-2], 1))
def make_channels_last_1d_strides_for(
shape: Sequence[_IntLikeT],
) -> tuple[Union[_IntLikeT, int], ...]:
torch._check(
len(shape) == 3,
lambda: "Only tensors of rank 3 can use the channels_last_1d memory format",
)
multiplier: Union[_IntLikeT, int] = 1
strides: list[Union[_IntLikeT, int]] = [0] * 3
for idx in (1, -1, 0):
# NOTE: intentionally divergence from make_contiguous_strides_for
# This is consistent with eager
strides[idx] = multiplier
multiplier *= shape[idx]
return tuple(strides)
def make_channels_last_2d_strides_for(
shape: Sequence[_IntLikeT],
) -> tuple[Union[_IntLikeT, int], ...]:
# TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
torch._check(
len(shape) == 4,
lambda: "Only tensors of rank 4 can use the channels_last memory format",
)
multiplier: Union[_IntLikeT, int] = 1
strides: list[Union[_IntLikeT, int]] = [0] * 4
for idx in (1, -1, -2, 0):
# NOTE: intentionally divergence from make_contiguous_strides_for
# This is consistent with eager
strides[idx] = multiplier
multiplier *= shape[idx]
return tuple(strides)
def make_channels_last_3d_strides_for(
shape: Sequence[_IntLikeT],
) -> tuple[Union[_IntLikeT, int], ...]:
torch._check(
len(shape) == 5,
lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
)
multiplier: Union[_IntLikeT, int] = 1
strides: list[Union[_IntLikeT, int]] = [0] * 5
for idx in (1, -1, -2, -3, 0):
# NOTE: intentionally divergence from make_contiguous_strides_for
# This is consistent with eager
strides[idx] = multiplier
multiplier *= shape[idx]
return tuple(strides)
def make_channels_last_strides_for(
shape: Sequence[_IntLikeT],
) -> tuple[Union[_IntLikeT, int], ...]:
ndim = len(shape) if isinstance(shape, Sequence) else 1
if ndim == 3:
return make_channels_last_1d_strides_for(shape)
elif ndim == 4:
return make_channels_last_2d_strides_for(shape)
elif ndim == 5:
return make_channels_last_3d_strides_for(shape)
else:
raise RuntimeError(
f"no channels last format strides exist in {ndim} dimensions"
)
def compute_reduction_output_shape(
shape: ShapeType, dimensions: Sequence
) -> tuple[int, ...]:
for idx in dimensions:
validate_idx(len(shape), idx)
new_shape = []
for idx in range(len(shape)):
if idx in dimensions:
continue
new_shape.append(shape[idx])
return tuple(new_shape)
def validate_no_repeating_dims(dims: Sequence):
if len(dims) != len(set(dims)):
raise RuntimeError("duplicate value in the list of dims")
def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> tuple[int, ...]:
if dims is None:
return tuple(range(len(shape)))
dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
validate_no_repeating_dims(dims)
return dims
def set_correction(
unbiased: Optional[bool] = None,
correction: Optional[NumberType] = None,
) -> float:
if correction is not None and unbiased is not None:
raise RuntimeError("cannot specify both correction and unbiased arguments")
elif correction is None and unbiased is None:
correction = 1.0
elif correction is None and unbiased is not None:
correction = 0.0 if unbiased is False else 1.0
# NB: we don't actually support symint here, but it's harmless to accept
if not isinstance(correction, (IntLike, FloatLike)):
raise ValueError("correction argument should be integer or float")
return sym_float(correction)
def compute_required_storage_length(
shape: ShapeType, strides: StrideType, storage_offset: int
) -> int:
"""Computes the minimum storage size to hold the given tensor geometry.
Example
=======
This is the size of a newly allocated tensor's storage, in units of elements
>>> t = torch.empty((10, 20))
>>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
200
>>> # xdoctest: +SKIP(failing)
>>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
>>> size = compute_required_storage_length(
... t2.shape, t2.stride(), t2.storage_offset()
... )
>>> size == t.storage().size()
True
A valid tensor may have a larger storage size, but never smaller
>>> slice = torch.empty(100)[20:40]
>>> slice.storage().size()
100
>>> compute_required_storage_length(
... slice.shape, slice.stride(), slice.storage_offset()
... )
40
"""
from torch.fx.experimental.symbolic_shapes import guard_or_false
# Short-circuits if the shape has no elements
# Note: we are unsafely assuming tensor is not empty here, without
# runtime assertions.
if guard_or_false(reduce(operator.mul, shape, 1) == 0):
return 0
max_offset = sum((x - 1) * y for x, y in zip(shape, strides))
# +1 to account for the first element which offsets are taken from
return 1 + storage_offset + max_offset
def check_in_bounds_for_storage(
a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
):
"""
Determines if the given shape, strides, and offset are valid for the given storage.
"""
required_length = compute_required_storage_length(shape, strides, storage_offset)
if a.size() < required_length:
msg = (
f"Can't view a storage of size {a.size()} with an offset of {storage_offset}, "
f"shape of {str(shape)}, and strides of {str(strides)}, "
f"which requires a storage of size {required_length}"
)
raise ValueError(msg)
# NOTE: This function should ideally be removed, but some Meta internal models
# packaged with `torch.package` are using it, so it will have to be removed
# at some point in the future when those models no longer use this function.
@deprecated(
"`torch._prims_common.check` is deprecated and will be removed in the future. "
"Please use `torch._check*` functions instead.",
category=FutureWarning,
)
def check(
b: bool, s: Callable[[], str], exc_type: type[Exception] = RuntimeError
) -> None:
"""
Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
Error message is a callable producing a string (to avoid wasting time
string formatting in non-error case, and also to make it easier for torchdynamo
to trace.)
.. note:: This function is planned for removal in the future. Please use
`torch._check*` functions instead.
"""
torch._check_with(exc_type, b, s)
# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
# c10/core/MemoryFormat.h into one function
# May return False when input sizes are data-dependent and the property is not
# determined.
def are_strides_like_channels_last_or_false(
shape: Sequence[int], strides: Sequence[int]
) -> bool:
from torch.fx.experimental.symbolic_shapes import (
guard_or_true,
statically_known_true,
)
ndim = len(shape)
if ndim == 4:
# Check for channels_last_2d
dim_order = [1, 3, 2, 0]
elif ndim == 5:
# Check for channels_last_3d
dim_order = [1, 4, 3, 2, 0]
else:
return False
if guard_or_true(strides[1] == 0):
return False
min = 0
for d in dim_order:
if guard_or_true(shape[d] == 0):
return False
if guard_or_true(strides[d] < min):
return False
if d == 0 and min == strides[1]:
return False
min = strides[d]
# Assume stride is not 1, the consequence is min could be larger than needed,
# which would result in returning False for this function but not vice versa,
# so it's ok.
if guard_or_true(strides[d] > 1):
min *= shape[d]
return True
def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
if x.layout != torch.strided:
return torch.contiguous_format
if are_strides_like_channels_last_or_false(x.shape, x.stride()):
return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
return torch.contiguous_format
def prod(xs: Sequence[NumberType]) -> NumberType:
"""Product of elements in input sequence. Returns 1 for empty sequence"""
return reduce(operator.mul, xs, 1)
def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:
"""Checks if a shape can be expanded to another shape.
This is equivalent to checking if the two shapes are broadcastable.
"""
# This is a Python implementation of
# aten/src/ATen/ExpandUtils.h:is_expandable_to
if len(shape) > len(desired):
return False
for i in range(len(shape)):
if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:
return False
return True
def mask_tensor(mask: TensorLikeType, t: TensorLikeType):
"""
Similar to torch.where(mask, t, 0) but if t is boolean,
result is also boolean and not promoted to int.
"""
# torch.where(mask, t, False) is equivalent
# but feels hacky and might break in the future
if t.dtype is torch.bool:
return mask.logical_and(t)
else:
return torch.where(mask, t, 0)
def get_aten_op(fn: Callable, name: str):
"""
Given the __module__ of reference and its name, it returns
(our best guess of) the ATen name of the associated operation
Note: In ATen, the __name__ of a function within a module often
starts by the module name. E.g. linalg_eigh, or special_zeta
"""
module = fn.__module__
prefix = "torch._refs"
assert module.startswith(prefix)
module = module[len(prefix) :]
# We want to go from .special / .nn.functional
# to special and special_ / nn_functional_
if module:
module = module[1:]
module = module.replace(".", "_")
module = module + "_"
return getattr(torch._ops.ops.aten, f"{module}{name}")
def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype:
return dtype if dtype is not None else torch.get_default_dtype()
def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType:
return device if device is not None else torch.device("cpu")
def layout_or_default(layout: Optional[torch.layout]) -> torch.layout:
return layout if layout is not None else torch.strided
def clone_preserve_strides(x):
needed_size = compute_required_storage_length(
x.size(), x.stride(), x.storage_offset()
)
# Our eager implementations for *_scatter ops are all primitives w.r.t autograd,
# so these as_strided() calls are not seen by autograd.
# We need to mimic this behavior in our ref/prim implementations.
# TODO: a better way to handle this would be with a new op, "_unsafe_as_strided"
# We should revisit this when we add a compositional as_strided op,
# and also as part of https://github.com/pytorch/pytorch/issues/90507
try:
old = torch._C._dispatch_tls_is_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView
)
torch._C._dispatch_tls_set_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView, True
)
buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone()
return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
finally:
torch._C._dispatch_tls_set_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView, old
)
def alert_not_deterministic(caller: str):
if torch.are_deterministic_algorithms_enabled():
if torch.is_deterministic_algorithms_warn_only_enabled():
warnings.warn(
f"{caller} does not have a deterministic implementation, but you set "
f"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
f"You can file an issue at https://github.com/pytorch/pytorch/issues "
f"to help us prioritize adding deterministic support for this operation.",
stacklevel=2,
)
else:
torch._check(
False,
lambda: (
f"{caller} does not have a deterministic implementation, but you set "
f"'torch.use_deterministic_algorithms(True)'. You can turn off "
f"determinism just for this operation, or you can use the "
f"'warn_only=True' option, if that's acceptable for your application. "
f"You can also file an issue at https://github.com/pytorch/pytorch/issues "
f"to help us prioritize adding deterministic support for this operation."
),
)
| RETURN_TYPE |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 131250,
"end": 132463
} | class ____(Response):
"""
Response of events.get_vector_metrics_and_variants endpoint.
:param metrics:
:type metrics: Sequence[dict]
"""
_service = "events"
_action = "get_vector_metrics_and_variants"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"metrics": {
"description": "",
"items": {"type": "object"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, metrics: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetVectorMetricsAndVariantsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[List[dict]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
self.assert_isinstance(value, "metrics", (dict,), is_array=True)
self._property_metrics = value
| GetVectorMetricsAndVariantsResponse |
python | getsentry__sentry | tests/flagpole/test_conditions.py | {
"start": 855,
"end": 4088
} | class ____:
def test_is_in(self) -> None:
values = ["bar", "baz"]
condition = InCondition(property="foo", value=values)
assert condition.match(context=EvaluationContext({"foo": "bar"}), segment_name="test")
not_condition = NotInCondition(property="foo", value=values)
assert not not_condition.match(
context=EvaluationContext({"foo": "bar"}), segment_name="test"
)
int_values = [1, 2]
condition = InCondition(property="foo", value=int_values)
# Validation check to ensure no type coercion occurs
assert condition.value == int_values
assert condition.match(context=EvaluationContext({"foo": 2}), segment_name="test")
assert not condition.match(context=EvaluationContext({"foo": 3}), segment_name="test")
def test_is_in_numeric_string(self) -> None:
values = ["123", "456"]
condition = InCondition(property="foo", value=values, operator="in")
assert condition.value == values
assert not condition.match(context=EvaluationContext({"foo": 123}), segment_name="test")
assert condition.match(context=EvaluationContext({"foo": "123"}), segment_name="test")
def test_is_not_in(self) -> None:
values = ["bar", "baz"]
condition = InCondition(property="foo", value=values)
assert not condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
not_condition = NotInCondition(property="foo", value=values)
assert not_condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
def test_is_in_case_insensitivity(self) -> None:
values = ["bAr", "baz"]
condition = InCondition(property="foo", value=values)
assert condition.match(context=EvaluationContext({"foo": "BaR"}), segment_name="test")
not_condition = NotInCondition(property="foo", value=values)
assert not not_condition.match(
context=EvaluationContext({"foo": "BaR"}), segment_name="test"
)
def test_invalid_property_value(self) -> None:
values = ["bar", "baz"]
condition = InCondition(property="foo", value=values)
bad_context = ([1], {"k": "v"})
for attr_val in bad_context:
with pytest.raises(ConditionTypeMismatchException):
condition.match(context=EvaluationContext({"foo": attr_val}), segment_name="test")
not_condition = NotInCondition(property="foo", value=values)
for attr_val in bad_context:
with pytest.raises(ConditionTypeMismatchException):
not_condition.match(
context=EvaluationContext({"foo": attr_val}), segment_name="test"
)
def test_missing_context_property(self) -> None:
values = ["bar", "baz"]
in_condition = InCondition(property="foo", value=values)
assert not in_condition.match(
context=EvaluationContext({"bar": "bar"}), segment_name="test"
)
not_on_condition = NotInCondition(property="foo", value=values)
assert not_on_condition.match(
context=EvaluationContext({"bar": "bar"}), segment_name="test"
)
| TestInConditions |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_minimum_bounding_radius_to_be_between.py | {
"start": 523,
"end": 2371
} | class ____(ColumnAggregateMetricProvider):
# This is the id string that will be used to reference your Metric.
metric_name = "column.geometry.minimum_bounding_radius"
value_keys = (
"column_shape_format",
"diameter_flag",
)
# This method implements the core logic for the PandasExecutionEngine
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
column_shape_format = kwargs.get("column_shape_format")
# Load the column into a pygeos Geometry vector from numpy array (Series not supported).
if column_shape_format == "wkt":
shape_test = geos.from_wkt(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "wkb":
shape_test = geos.from_wkb(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "xy":
shape_df = pd.DataFrame(column.to_list(), columns=("x", "y"))
shape_test = geos.points(shape_df.lon, y=shape_df.lat)
else:
raise NotImplementedError("Column values shape format not implemented.")
shape_test = geos.union_all(shape_test)
radius = geos.minimum_bounding_radius(shape_test)
return radius
# This method defines the business logic for evaluating your Metric when using a SqlAlchemyExecutionEngine
# @column_aggregate_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
#
# This method defines the business logic for evaluating your Metric when using a SparkDFExecutionEngine
# @column_aggregate_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnAggregateGeometryBoundingRadius |
python | tiangolo__fastapi | tests/test_additional_responses_custom_validationerror.py | {
"start": 318,
"end": 2974
} | class ____(BaseModel):
errors: typing.List[Error]
@app.get(
"/a/{id}",
response_class=JsonApiResponse,
responses={422: {"description": "Error", "model": JsonApiError}},
)
async def a(id):
pass # pragma: no cover
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/a/{id}": {
"get": {
"responses": {
"422": {
"description": "Error",
"content": {
"application/vnd.api+json": {
"schema": {
"$ref": "#/components/schemas/JsonApiError"
}
}
},
},
"200": {
"description": "Successful Response",
"content": {"application/vnd.api+json": {"schema": {}}},
},
},
"summary": "A",
"operationId": "a_a__id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Id"},
"name": "id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"Error": {
"title": "Error",
"required": ["status", "title"],
"type": "object",
"properties": {
"status": {"title": "Status", "type": "string"},
"title": {"title": "Title", "type": "string"},
},
},
"JsonApiError": {
"title": "JsonApiError",
"required": ["errors"],
"type": "object",
"properties": {
"errors": {
"title": "Errors",
"type": "array",
"items": {"$ref": "#/components/schemas/Error"},
}
},
},
}
},
}
| JsonApiError |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_distance_measures.py | {
"start": 187,
"end": 2023
} | class ____:
"""Unit tests for the approximate diameter function
:func:`~networkx.algorithms.approximation.distance_measures.diameter`.
"""
def test_null_graph(self):
"""Test empty graph."""
G = nx.null_graph()
with pytest.raises(
nx.NetworkXError, match="Expected non-empty NetworkX graph!"
):
diameter(G)
def test_undirected_non_connected(self):
"""Test an undirected disconnected graph."""
graph = nx.path_graph(10)
graph.remove_edge(3, 4)
with pytest.raises(nx.NetworkXError, match="Graph not connected."):
diameter(graph)
def test_directed_non_strongly_connected(self):
"""Test a directed non strongly connected graph."""
graph = nx.path_graph(10, create_using=nx.DiGraph())
with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."):
diameter(graph)
def test_complete_undirected_graph(self):
"""Test a complete undirected graph."""
graph = nx.complete_graph(10)
assert diameter(graph) == 1
def test_complete_directed_graph(self):
"""Test a complete directed graph."""
graph = nx.complete_graph(10, create_using=nx.DiGraph())
assert diameter(graph) == 1
def test_undirected_path_graph(self):
"""Test an undirected path graph with 10 nodes."""
graph = nx.path_graph(10)
assert diameter(graph) == 9
def test_directed_path_graph(self):
"""Test a directed path graph with 10 nodes."""
graph = nx.path_graph(10).to_directed()
assert diameter(graph) == 9
def test_single_node(self):
"""Test a graph which contains just a node."""
graph = nx.Graph()
graph.add_node(1)
assert diameter(graph) == 0
| TestDiameter |
python | huggingface__transformers | src/transformers/models/dots1/modeling_dots1.py | {
"start": 21702,
"end": 25533
} | class ____(Dots1PreTrainedModel):
def __init__(self, config: Dots1Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Dots1DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Dots1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Dots1RotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
# The sliding window alternating layers are not always activated depending on the config
if self.has_sliding_layers:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
| Dots1Model |
python | pypa__hatch | src/hatch/template/files_default.py | {
"start": 2264,
"end": 5947
} | class ____(File):
TEMPLATE = """\
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "{project_name_normalized}"
dynamic = ["version"]
description = {description!r}
readme = "{readme_file_path}"
requires-python = ">=3.8"
license = "{license_expression}"{license_files}
keywords = []
authors = [
{{ name = "{name}", email = "{email}" }},
]
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = {dependency_data}
[project.urls]{project_url_data}{cli_scripts}
[tool.hatch.version]
path = "{package_metadata_file_path}"{tests_section}
"""
def __init__(self, template_config: dict, plugin_config: dict):
template_config = dict(template_config)
template_config["name"] = repr(template_config["name"])[1:-1]
project_url_data = ""
project_urls = (
plugin_config["project_urls"]
if "project_urls" in plugin_config
else {
"Documentation": "https://github.com/{name}/{project_name_normalized}#readme",
"Issues": "https://github.com/{name}/{project_name_normalized}/issues",
"Source": "https://github.com/{name}/{project_name_normalized}",
}
)
if project_urls:
for label, url in project_urls.items():
normalized_label = f'"{label}"' if " " in label else label
project_url_data += f'\n{normalized_label} = "{url.format(**template_config)}"'
dependency_data = "["
if template_config["dependencies"]:
for dependency in sorted(template_config["dependencies"]):
dependency_data += f'\n "{dependency}",\n'
dependency_data += "]"
cli_scripts = ""
if template_config["args"]["cli"]:
cli_scripts = f"""
[project.scripts]
{template_config["project_name_normalized"]} = "{template_config["package_name"]}.cli:{template_config["package_name"]}"\
"""
tests_section = ""
if plugin_config["tests"]:
package_location = "src/" if plugin_config["src-layout"] else ""
tests_section = f"""
[tool.hatch.envs.types]
extra-dependencies = [
"mypy>=1.0.0",
]
[tool.hatch.envs.types.scripts]
check = "mypy --install-types --non-interactive {{args:{package_location}{template_config["package_name"]} tests}}"
[tool.coverage.run]
source_pkgs = ["{template_config["package_name"]}", "tests"]
branch = true
parallel = true
omit = [
"{package_location}{template_config["package_name"]}/__about__.py",
]
[tool.coverage.paths]
{template_config["package_name"]} = ["{package_location}{template_config["package_name"]}", "*/{template_config["project_name_normalized"]}/{package_location}{template_config["package_name"]}"]
tests = ["tests", "*/{template_config["project_name_normalized"]}/tests"]
[tool.coverage.report]
exclude_lines = [
"no cov",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
]"""
super().__init__(
Path("pyproject.toml"),
self.TEMPLATE.format(
project_url_data=project_url_data,
dependency_data=dependency_data,
cli_scripts=cli_scripts,
tests_section=tests_section,
**template_config,
),
)
| PyProject |
python | Netflix__metaflow | metaflow/plugins/cards/card_client.py | {
"start": 3903,
"end": 9501
} | class ____:
"""
`CardContainer` is an immutable list-like object, returned by `get_cards`,
which contains individual `Card`s.
Notably, `CardContainer` contains a special
`_repr_html_` function which renders cards automatically in an output
cell of a notebook.
The following operations are supported:
```
cards = get_cards(MyTask)
# retrieve by index
first_card = cards[0]
# check length
if len(cards) > 1:
print('many cards present!')
# iteration
list_of_cards = list(cards)
```
"""
def __init__(self, card_paths, card_ds, origin_pathspec=None):
self._card_paths = card_paths
self._card_ds = card_ds
self._current = 0
self._high = len(card_paths)
self.from_resumed = origin_pathspec is not None
self.origin_pathspec = origin_pathspec
def __len__(self):
return self._high
def __iter__(self):
for idx in range(self._high):
yield self._get_card(idx)
def __getitem__(self, index):
return self._get_card(index)
def _get_card(self, index):
if index >= self._high:
raise IndexError
path = self._card_paths[index]
card_info = self._card_ds.info_from_path(path, suffix=CardNameSuffix.CARD)
# todo : find card creation date and put it in client.
return Card(
self._card_ds,
card_info.type,
path,
card_info.hash,
id=card_info.id,
html=None,
created_on=None,
)
def _make_heading(self, type):
return "<h1>Displaying Card Of Type : %s</h1>" % type.title()
def _repr_html_(self):
main_html = []
for idx, _ in enumerate(self._card_paths):
card = self._get_card(idx)
main_html.append(self._make_heading(card.type))
container_id = uuid.uuid4()
main_html.append(
"<script type='text/javascript'>var mfContainerId = '%s';</script>"
% container_id
)
main_html.append(
"<div class='embed' data-container='%s'>%s</div>"
% (container_id, card.get())
)
return "\n".join(main_html)
def get_cards(
task: Union[str, "metaflow.Task"],
id: Optional[str] = None,
type: Optional[str] = None,
follow_resumed: bool = True,
) -> CardContainer:
"""
Get cards related to a `Task`.
Note that `get_cards` resolves the cards contained by the task, but it doesn't actually
retrieve them from the datastore. Actual card contents are retrieved lazily either when
the card is rendered in a notebook to when you call `Card.get`. This means that
`get_cards` is a fast call even when individual cards contain a lot of data.
Parameters
----------
task : Union[str, `Task`]
A `Task` object or pathspec `{flow_name}/{run_id}/{step_name}/{task_id}` that
uniquely identifies a task.
id : str, optional, default None
The ID of card to retrieve if multiple cards are present.
type : str, optional, default None
The type of card to retrieve if multiple cards are present.
follow_resumed : bool, default True
If the task has been resumed, then setting this flag will resolve the card for
the origin task.
Returns
-------
CardContainer
A list-like object that holds `Card` objects.
"""
from metaflow.client import Task
from metaflow import namespace
card_id = id
if isinstance(task, str):
task_str = task
if len(task_str.split("/")) != 4:
# Exception that pathspec is not of correct form
raise IncorrectPathspecException(task_str)
# set namespace as None so that we don't face namespace mismatch error.
namespace(None)
task = Task(task_str)
elif not isinstance(task, Task):
# Exception that the task argument should be of form `Task` or `str`
raise IncorrectArgumentException(_TYPE(task))
origin_taskpathspec = None
if follow_resumed:
origin_taskpathspec = resumed_info(task)
if origin_taskpathspec:
task = Task(origin_taskpathspec)
card_paths, card_ds = resolve_paths_from_task(
_get_flow_datastore(task), pathspec=task.pathspec, type=type, card_id=card_id
)
return CardContainer(
card_paths,
card_ds,
origin_pathspec=origin_taskpathspec,
)
def _get_flow_datastore(task):
flow_name = task.pathspec.split("/")[0]
# Resolve datastore type
ds_type = None
# We need to set the correct datastore root here so that
# we can ensure that the card client picks up the correct path to the cards
meta_dict = task.metadata_dict
ds_type = meta_dict.get("ds-type", None)
if ds_type is None:
raise UnresolvableDatastoreException(task)
ds_root = meta_dict.get("ds-root", None)
if ds_root:
ds_root = os.path.join(ds_root, CARD_SUFFIX)
else:
ds_root = CardDatastore.get_storage_root(ds_type)
# Delay load to prevent circular dep
from metaflow.plugins import DATASTORES
storage_impl = [d for d in DATASTORES if d.TYPE == ds_type][0]
return FlowDataStore(
flow_name=flow_name,
environment=None, # TODO: Add environment here
storage_impl=storage_impl,
# ! ds root cannot be none otherwise `list_content`
# ! method fails in the datastore abstraction.
ds_root=ds_root,
)
| CardContainer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/base.py | {
"start": 39616,
"end": 43859
} | class ____(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.:
.. sourcecode:: text
2021-03-15 12:05:57.105542
The incoming storage format is by default parsed using the
Python ``datetime.fromisoformat()`` function.
.. versionchanged:: 2.0 ``datetime.fromisoformat()`` is used for default
datetime string parsing.
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format=(
"%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(minute)02d:%(second)02d"
),
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)",
)
:param truncate_microseconds: when ``True`` microseconds will be truncated
from the datetime. Can't be specified together with ``storage_format``
or ``regexp``.
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming
strings. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super().__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(
self, dialect: Dialect
) -> Optional[_BindProcessorType[Any]]:
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(
self, dialect: Dialect, coltype: object
) -> Optional[_ResultProcessorType[Any]]:
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
| DATETIME |
python | ray-project__ray | python/ray/tests/test_coordinator_server.py | {
"start": 872,
"end": 12520
} | class ____(unittest.TestCase):
def setUp(self):
self.list_of_node_ips = ["0.0.0.0:1", "0.0.0.0:2"]
self.host, self.port = socket.gethostbyname(socket.gethostname()), 1234
self.server = OnPremCoordinatorServer(
list_of_node_ips=self.list_of_node_ips,
host=self.host,
port=self.port,
)
self.coordinator_address = build_address(self.host, self.port)
def tearDown(self):
self.server.shutdown()
state_save_path = "/tmp/coordinator.state"
if os.path.exists(state_save_path):
os.remove(state_save_path)
def testImportingCorrectClass(self):
"""Check correct import when coordinator_address is in config yaml."""
provider_config = {"coordinator_address": "fake_address:1234"}
coordinator_node_provider = _NODE_PROVIDERS.get("local")(provider_config)
assert coordinator_node_provider is CoordinatorSenderNodeProvider
local_node_provider = _NODE_PROVIDERS.get("local")({})
assert local_node_provider is LocalNodeProvider
@pytest.fixture(autouse=True)
def _set_monkeypatch(self, monkeypatch):
self._monkeypatch = monkeypatch
@pytest.fixture(autouse=True)
def _set_tmpdir(self, tmpdir):
self._tmpdir = tmpdir
def testClusterStateInit(self):
"""Check ClusterState __init__ func generates correct state file.
Test the general use case and if num_workers increase/decrease.
"""
# Use a random head_ip so that the state file is regenerated each time
# this test is run. (Otherwise the test will fail spuriously when run a
# second time.)
self._monkeypatch.setenv("RAY_TMPDIR", self._tmpdir)
# ensure that a new cluster can start up if RAY_TMPDIR doesn't exist yet
assert not os.path.exists(get_ray_temp_dir())
head_ip = ".".join(str(random.randint(0, 255)) for _ in range(4))
cluster_config = {
"cluster_name": "random_name",
"min_workers": 0,
"max_workers": 0,
"provider": {
"type": "local",
"head_ip": head_ip,
"worker_ips": ["0.0.0.0:1"],
"external_head_ip": "0.0.0.0.3",
},
}
provider_config = cluster_config["provider"]
node_provider = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
assert os.path.exists(get_ray_temp_dir())
assert node_provider.external_ip(head_ip) == "0.0.0.0.3"
assert isinstance(node_provider, LocalNodeProvider)
expected_workers = {}
expected_workers[provider_config["head_ip"]] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
"state": "terminated",
"external_ip": "0.0.0.0.3",
}
expected_workers[provider_config["worker_ips"][0]] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
"state": "terminated",
}
state_save_path = local_config.get_state_path(cluster_config["cluster_name"])
assert os.path.exists(state_save_path)
workers = json.loads(open(state_save_path).read())
assert workers == expected_workers
# Test removing workers updates the cluster state.
del expected_workers[provider_config["worker_ips"][0]]
removed_ip = provider_config["worker_ips"].pop()
node_provider = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
workers = json.loads(open(state_save_path).read())
assert workers == expected_workers
# Test adding back workers updates the cluster state.
expected_workers[removed_ip] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
"state": "terminated",
}
provider_config["worker_ips"].append(removed_ip)
node_provider = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
workers = json.loads(open(state_save_path).read())
assert workers == expected_workers
# Test record_local_head_state_if_needed
head_ip = cluster_config["provider"]["head_ip"]
cluster_name = cluster_config["cluster_name"]
node_provider = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
assert head_ip not in node_provider.non_terminated_nodes({})
record_local_head_state_if_needed(node_provider)
assert head_ip in node_provider.non_terminated_nodes({})
expected_head_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: local_config.LOCAL_CLUSTER_NODE_TYPE,
TAG_RAY_NODE_NAME: "ray-{}-head".format(cluster_name),
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
}
assert node_provider.node_tags(head_ip) == expected_head_tags
# Repeat and verify nothing has changed.
record_local_head_state_if_needed(node_provider)
assert head_ip in node_provider.non_terminated_nodes({})
assert node_provider.node_tags(head_ip) == expected_head_tags
def testOnPremCoordinatorStateInit(self):
"""If OnPremCoordinatorState __init__ generates correct state file.
Test the general use case and if the coordinator server crashes or
updates the list of node ips with more/less nodes.
"""
expected_nodes = {}
for ip in self.list_of_node_ips:
expected_nodes[ip] = {
"tags": {},
"state": "terminated",
}
state_save_path = "/tmp/coordinator.state"
assert os.path.exists(state_save_path)
nodes = json.loads(open(state_save_path).read())
assert nodes == expected_nodes
# Test removing workers updates the cluster state.
del expected_nodes[self.list_of_node_ips[1]]
self.server.shutdown()
self.server = OnPremCoordinatorServer(
list_of_node_ips=self.list_of_node_ips[0:1],
host=self.host,
port=self.port,
)
nodes = json.loads(open(state_save_path).read())
assert nodes == expected_nodes
# Test adding back workers updates the cluster state.
expected_nodes[self.list_of_node_ips[1]] = {
"tags": {},
"state": "terminated",
}
self.server.shutdown()
self.server = OnPremCoordinatorServer(
list_of_node_ips=self.list_of_node_ips,
host=self.host,
port=self.port,
)
nodes = json.loads(open(state_save_path).read())
assert nodes == expected_nodes
def testCoordinatorSenderNodeProvider(self):
"""Integration test of CoordinatorSenderNodeProvider."""
cluster_config = {
"cluster_name": "random_name",
"min_workers": 0,
"max_workers": 0,
"provider": {
"type": "local",
"coordinator_address": self.coordinator_address,
},
"head_node": {},
"worker_nodes": {},
}
provider_config = cluster_config["provider"]
node_provider_1 = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
assert isinstance(node_provider_1, CoordinatorSenderNodeProvider)
assert not node_provider_1.non_terminated_nodes({})
assert not node_provider_1.is_running(self.list_of_node_ips[0])
assert node_provider_1.is_terminated(self.list_of_node_ips[0])
assert not node_provider_1.node_tags(self.list_of_node_ips[0])
head_node_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
}
assert not node_provider_1.non_terminated_nodes(head_node_tags)
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
cluster_config["cluster_name"]
)
node_provider_1.create_node(cluster_config["head_node"], head_node_tags, 1)
assert node_provider_1.non_terminated_nodes({}) == [self.list_of_node_ips[0]]
head_node_tags[TAG_RAY_CLUSTER_NAME] = cluster_config["cluster_name"]
assert node_provider_1.node_tags(self.list_of_node_ips[0]) == head_node_tags
assert node_provider_1.is_running(self.list_of_node_ips[0])
assert not node_provider_1.is_terminated(self.list_of_node_ips[0])
# Add another cluster.
cluster_config["cluster_name"] = "random_name_2"
provider_config = cluster_config["provider"]
node_provider_2 = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
assert not node_provider_2.non_terminated_nodes({})
assert not node_provider_2.is_running(self.list_of_node_ips[1])
assert node_provider_2.is_terminated(self.list_of_node_ips[1])
assert not node_provider_2.node_tags(self.list_of_node_ips[1])
assert not node_provider_2.non_terminated_nodes(head_node_tags)
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
cluster_config["cluster_name"]
)
node_provider_2.create_node(cluster_config["head_node"], head_node_tags, 1)
assert node_provider_2.non_terminated_nodes({}) == [self.list_of_node_ips[1]]
head_node_tags[TAG_RAY_CLUSTER_NAME] = cluster_config["cluster_name"]
assert node_provider_2.node_tags(self.list_of_node_ips[1]) == head_node_tags
assert node_provider_2.is_running(self.list_of_node_ips[1])
assert not node_provider_2.is_terminated(self.list_of_node_ips[1])
# Add another cluster (should fail because we only have two nodes).
cluster_config["cluster_name"] = "random_name_3"
provider_config = cluster_config["provider"]
node_provider_3 = _get_node_provider(
provider_config, cluster_config["cluster_name"], use_cache=False
)
assert not node_provider_3.non_terminated_nodes(head_node_tags)
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
cluster_config["cluster_name"]
)
node_provider_3.create_node(cluster_config["head_node"], head_node_tags, 1)
assert not node_provider_3.non_terminated_nodes({})
# Terminate all nodes.
node_provider_1.terminate_node(self.list_of_node_ips[0])
assert not node_provider_1.non_terminated_nodes({})
node_provider_2.terminate_node(self.list_of_node_ips[1])
assert not node_provider_2.non_terminated_nodes({})
# Check if now we can create more clusters/nodes.
node_provider_3.create_node(cluster_config["head_node"], head_node_tags, 1)
worker_node_tags = {
TAG_RAY_NODE_NAME: "ray-{}-worker".format(cluster_config["cluster_name"]),
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
}
node_provider_3.create_node(cluster_config["worker_nodes"], worker_node_tags, 1)
assert node_provider_3.non_terminated_nodes({}) == self.list_of_node_ips
worker_filter = {TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
assert node_provider_3.non_terminated_nodes(worker_filter) == [
self.list_of_node_ips[1]
]
head_filter = {TAG_RAY_NODE_KIND: NODE_KIND_HEAD}
assert node_provider_3.non_terminated_nodes(head_filter) == [
self.list_of_node_ips[0]
]
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| OnPremCoordinatorServerTest |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 118747,
"end": 119246
} | class ____(UnittestPair):
CLS = object
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
| ObjectPair |
python | getsentry__sentry-python | tests/conftest.py | {
"start": 8634,
"end": 16739
} | class ____:
def __init__(self, read_file, write_file):
self.read_file = read_file
self.write_file = write_file
def read_event(self):
return json.loads(self.read_file.readline().decode("utf-8"))
def read_flush(self):
assert self.read_file.readline() == b"flush\n"
# scope=session ensures that fixture is run earlier
@pytest.fixture(
scope="session",
params=[None, "eventlet", "gevent"],
ids=("threads", "eventlet", "greenlet"),
)
def maybe_monkeypatched_threading(request):
if request.param == "eventlet":
if eventlet is None:
pytest.skip("no eventlet installed")
try:
eventlet.monkey_patch()
except AttributeError as e:
if "'thread.RLock' object has no attribute" in str(e):
# https://bitbucket.org/pypy/pypy/issues/2962/gevent-cannot-patch-rlock-under-pypy-27-7
pytest.skip("https://github.com/eventlet/eventlet/issues/546")
else:
raise
elif request.param == "gevent":
if gevent is None:
pytest.skip("no gevent installed")
try:
gevent.monkey.patch_all()
except Exception as e:
if "_RLock__owner" in str(e):
pytest.skip("https://github.com/gevent/gevent/issues/1380")
else:
raise
else:
assert request.param is None
return request.param
@pytest.fixture
def render_span_tree():
def inner(event):
assert event["type"] == "transaction"
by_parent = {}
for span in event["spans"]:
by_parent.setdefault(span["parent_span_id"], []).append(span)
def render_span(span):
yield "- op={}: description={}".format(
json.dumps(span.get("op")), json.dumps(span.get("description"))
)
for subspan in by_parent.get(span["span_id"]) or ():
for line in render_span(subspan):
yield " {}".format(line)
root_span = event["contexts"]["trace"]
return "\n".join(render_span(root_span))
return inner
@pytest.fixture(name="StringContaining")
def string_containing_matcher():
"""
An object which matches any string containing the substring passed to the
object at instantiation time.
Useful for assert_called_with, assert_any_call, etc.
Used like this:
>>> f = mock.Mock()
>>> f("dogs are great")
>>> f.assert_any_call("dogs") # will raise AssertionError
Traceback (most recent call last):
...
AssertionError: mock('dogs') call not found
>>> f.assert_any_call(StringContaining("dogs")) # no AssertionError
"""
class StringContaining:
def __init__(self, substring):
self.substring = substring
self.valid_types = (str, bytes)
def __eq__(self, test_string):
if not isinstance(test_string, self.valid_types):
return False
# this is safe even in py2 because as of 2.6, `bytes` exists in py2
# as an alias for `str`
if isinstance(test_string, bytes):
test_string = test_string.decode()
if len(self.substring) > len(test_string):
return False
return self.substring in test_string
def __ne__(self, test_string):
return not self.__eq__(test_string)
return StringContaining
def _safe_is_equal(x, y):
"""
Compares two values, preferring to use the first's __eq__ method if it
exists and is implemented.
Accounts for py2/py3 differences (like ints in py2 not having a __eq__
method), as well as the incomparability of certain types exposed by using
raw __eq__ () rather than ==.
"""
# Prefer using __eq__ directly to ensure that examples like
#
# maisey = Dog()
# maisey.name = "Maisey the Dog"
# maisey == ObjectDescribedBy(attrs={"name": StringContaining("Maisey")})
#
# evaluate to True (in other words, examples where the values in self.attrs
# might also have custom __eq__ methods; this makes sure those methods get
# used if possible)
try:
is_equal = x.__eq__(y)
except AttributeError:
is_equal = NotImplemented
# this can happen on its own, too (i.e. without an AttributeError being
# thrown), which is why this is separate from the except block above
if is_equal == NotImplemented:
# using == smoothes out weird variations exposed by raw __eq__
return x == y
return is_equal
@pytest.fixture(name="DictionaryContaining")
def dictionary_containing_matcher():
"""
An object which matches any dictionary containing all key-value pairs from
the dictionary passed to the object at instantiation time.
Useful for assert_called_with, assert_any_call, etc.
Used like this:
>>> f = mock.Mock()
>>> f({"dogs": "yes", "cats": "maybe"})
>>> f.assert_any_call({"dogs": "yes"}) # will raise AssertionError
Traceback (most recent call last):
...
AssertionError: mock({'dogs': 'yes'}) call not found
>>> f.assert_any_call(DictionaryContaining({"dogs": "yes"})) # no AssertionError
"""
class DictionaryContaining:
def __init__(self, subdict):
self.subdict = subdict
def __eq__(self, test_dict):
if not isinstance(test_dict, dict):
return False
if len(self.subdict) > len(test_dict):
return False
for key, value in self.subdict.items():
try:
test_value = test_dict[key]
except KeyError: # missing key
return False
if not _safe_is_equal(value, test_value):
return False
return True
def __ne__(self, test_dict):
return not self.__eq__(test_dict)
return DictionaryContaining
@pytest.fixture(name="ObjectDescribedBy")
def object_described_by_matcher():
"""
An object which matches any other object with the given properties.
Available properties currently are "type" (a type object) and "attrs" (a
dictionary).
Useful for assert_called_with, assert_any_call, etc.
Used like this:
>>> class Dog:
... pass
...
>>> maisey = Dog()
>>> maisey.name = "Maisey"
>>> maisey.age = 7
>>> f = mock.Mock()
>>> f(maisey)
>>> f.assert_any_call(ObjectDescribedBy(type=Dog)) # no AssertionError
>>> f.assert_any_call(ObjectDescribedBy(attrs={"name": "Maisey"})) # no AssertionError
"""
class ObjectDescribedBy:
def __init__(self, type=None, attrs=None):
self.type = type
self.attrs = attrs
def __eq__(self, test_obj):
if self.type:
if not isinstance(test_obj, self.type):
return False
if self.attrs:
for attr_name, attr_value in self.attrs.items():
try:
test_value = getattr(test_obj, attr_name)
except AttributeError: # missing attribute
return False
if not _safe_is_equal(attr_value, test_value):
return False
return True
def __ne__(self, test_obj):
return not self.__eq__(test_obj)
return ObjectDescribedBy
@pytest.fixture
def teardown_profiling():
# Make sure that a previous test didn't leave the profiler running
teardown_profiler()
teardown_continuous_profiler()
yield
# Make sure that to shut down the profiler after the test
teardown_profiler()
teardown_continuous_profiler()
@pytest.fixture()
def suppress_deprecation_warnings():
"""
Use this fixture to suppress deprecation warnings in a test.
Useful for testing deprecated SDK features.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
| EventStreamReader |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 18955,
"end": 19331
} | class ____:
"""Test ta_IN phone number provider methods"""
def test_phone_number(self, faker, num_samples):
pattern: Pattern = re.compile(
r"\+91 \d{3} ?\d{7}|" r"0\d{2}(-)?\d{2}(?(1)| ?)\d{6}",
)
for _ in range(num_samples):
phone_number = faker.phone_number()
assert pattern.fullmatch(phone_number)
| TestTaIn |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 36001,
"end": 37858
} | class ____(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return (data - loc) / scale, loc, scale
# Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMeanScaler with PatchTST->PatchTSMixer
| PatchTSMixerStdScaler |
python | django-compressor__django-compressor | compressor/management/commands/compress.py | {
"start": 933,
"end": 15906
} | class ____(BaseCommand):
help = "Compress content outside of the request/response cycle"
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
"--extension",
"-e",
action="append",
dest="extensions",
help='The file extension(s) to examine (default: ".html", '
"separate multiple extensions with commas, or use -e "
"multiple times)",
)
parser.add_argument(
"-f",
"--force",
default=False,
action="store_true",
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.",
dest="force",
)
parser.add_argument(
"--follow-links",
default=False,
action="store_true",
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to STATIC_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.",
dest="follow_links",
)
parser.add_argument(
"--engine",
default=[],
action="append",
help="Specifies the templating engine. jinja2 and django are "
"supported. It may be a specified more than once for "
"multiple engines. If not specified, django engine is used.",
dest="engines",
)
def get_loaders(self):
template_source_loaders = []
for e in engines.all():
if hasattr(e, "engine"):
template_source_loaders.extend(
e.engine.get_template_loaders(e.engine.loaders)
)
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
# The cached Loader and similar ones include a 'loaders' attribute
# so we look for that.
for loader in template_source_loaders:
if hasattr(loader, "loaders"):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def __get_parser(self, engine):
charset = (
settings.FILE_CHARSET if settings.is_overridden("FILE_CHARSET") else "utf-8"
)
if engine == "jinja2":
from compressor.offline.jinja2 import Jinja2Parser
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
parser = Jinja2Parser(charset=charset, env=env)
elif engine == "django":
from compressor.offline.django import DjangoParser
parser = DjangoParser(charset=charset)
else:
raise OfflineGenerationError("Invalid templating engine specified.")
return parser
def compress(self, engine, extensions, verbosity, follow_links, log):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
if not self.get_loaders():
raise OfflineGenerationError(
"No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings or set 'loaders' in your "
"TEMPLATES dictionary."
)
templates = set()
if engine == "django":
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module, "get_template_sources", None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(
smart_str(origin) for origin in get_template_sources("")
)
except (ImportError, AttributeError, TypeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError(
"No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"https://docs.djangoproject.com/en/2.1/topics/templates/ "
"for more information on template "
"loaders."
)
if verbosity >= 2:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
for path in paths:
for root, dirs, files in os.walk(path, followlinks=follow_links):
templates.update(
os.path.relpath(os.path.join(root, name), path)
for name in files
if not name.startswith(".")
and any(fnmatch(name, "*%s" % glob) for glob in extensions)
)
elif engine == "jinja2":
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
if env and hasattr(env, "list_templates"):
templates |= set(
[
env.loader.get_source(env, template)[1]
for template in env.list_templates(
filter_func=lambda _path: os.path.splitext(_path)[-1]
in extensions
)
]
)
if not templates:
raise OfflineGenerationError(
"No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct."
)
if verbosity >= 2:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if isinstance(contexts, str):
try:
module, function = get_mod_func(contexts)
contexts = getattr(import_module(module), function)()
except (AttributeError, ImportError, TypeError) as e:
raise ImportError(
"Couldn't import offline context function %s: %s"
% (settings.COMPRESS_OFFLINE_CONTEXT, e)
)
elif not isinstance(contexts, (list, tuple)):
contexts = [contexts]
parser = self.__get_parser(engine)
fine_templates = []
if verbosity >= 1:
log.write("Compressing... ")
for template_name in templates:
try:
template = parser.parse(template_name)
template.template_name = template_name
fine_templates.append(template)
except IOError: # unreadable file -> ignore
if verbosity >= 1:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError as e: # broken template -> ignore
if verbosity >= 1:
log.write(
"Invalid template %s: %s\n" % (template_name, smart_str(e))
)
continue
except TemplateDoesNotExist: # non existent template -> ignore
if verbosity >= 1:
log.write("Non-existent template at: %s\n" % template_name)
continue
except UnicodeDecodeError:
if verbosity >= 1:
log.write(
"UnicodeDecodeError while trying to read "
"template %s\n" % template_name
)
continue
contexts_count = 0
nodes_count = 0
offline_manifest = OrderedDict()
errors = []
for context_dict in contexts:
compressor_nodes = OrderedDict()
for template in fine_templates:
context = Context(parser.get_init_context(context_dict))
try:
nodes = list(parser.walk_nodes(template, context=context))
except (TemplateDoesNotExist, TemplateSyntaxError) as e:
# Could be an error in some base template
if verbosity >= 1:
log.write(
"Error parsing template %s: %s\n"
% (template.template_name, smart_str(e))
)
continue
if nodes:
template_nodes = compressor_nodes.setdefault(
template, OrderedDict()
)
for node in nodes:
nodes_count += 1
template_nodes.setdefault(node, []).append(context)
pool = concurrent.futures.ThreadPoolExecutor(max_workers=4)
for template, nodes in compressor_nodes.items():
template._log = log
template._log_verbosity = verbosity
pool.submit(
self._compress_template,
offline_manifest,
nodes,
parser,
template,
errors,
)
pool.shutdown(wait=True)
contexts_count += 1
# If errors exist, raise the first one in the list
if errors:
raise errors[0]
elif not nodes_count:
raise OfflineGenerationError(
"No 'compress' template tags found in templates."
"Try running compress command with --follow-links and/or"
"--extension=EXTENSIONS"
)
if verbosity >= 1:
log.write(
"done\nCompressed %d block(s) from %d template(s) for %d context(s).\n"
% (len(offline_manifest), nodes_count, contexts_count)
)
return offline_manifest, len(offline_manifest), offline_manifest.values()
@staticmethod
def _compress_template(offline_manifest, nodes, parser, template, errors):
for node, node_contexts in nodes.items():
for context in node_contexts:
context.push()
if not parser.process_template(template, context):
continue
parser.process_node(template, context, node)
rendered = parser.render_nodelist(template, context, node)
key = get_offline_hexdigest(rendered)
# Atomically check if the key exists in offline manifest.
# If it doesn't, set a placeholder key (None). This is to prevent
# concurrent _compress_template instances from rendering the
# same node, and then writing to the same file.
with offline_manifest_lock:
if key in offline_manifest:
continue
offline_manifest[key] = None
node_id = id(node)
with node_locks_lock:
node_lock = node_locks[node_id]
try:
with node_lock:
result = parser.render_node(template, context, node)
except Exception as e:
errors.append(
CommandError(
"An error occurred during rendering %s: "
"%s" % (template.template_name, smart_str(e))
)
)
del offline_manifest[key]
return
finally:
with node_locks_lock:
node_locks.pop(node_id, None)
result = result.replace(
settings.COMPRESS_URL, settings.COMPRESS_URL_PLACEHOLDER
)
offline_manifest[key] = result
context.pop()
def handle_extensions(self, extensions=("html",)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(" ", "").split(","))
for i, ext in enumerate(ext_list):
if not ext.startswith("."):
ext_list[i] = ".%s" % ext_list[i]
return set(ext_list)
def handle(self, **options):
self.handle_inner(**options)
def handle_inner(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"setting or use --force to override."
)
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compression is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override."
)
log = options.get("log", sys.stdout)
verbosity = options.get("verbosity", 1)
follow_links = options.get("follow_links", False)
extensions = self.handle_extensions(options.get("extensions") or ["html"])
engines = [e.strip() for e in options.get("engines", [])] or ["django"]
final_offline_manifest = {}
final_block_count = 0
final_results = []
for engine in engines:
offline_manifest, block_count, results = self.compress(
engine, extensions, verbosity, follow_links, log
)
final_results.extend(results)
final_block_count += block_count
final_offline_manifest.update(offline_manifest)
write_offline_manifest(final_offline_manifest)
return final_block_count, final_results
| Command |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/loader.py | {
"start": 1791,
"end": 3148
} | class ____(ABC):
"""A scoped object in which Loadable objects will be fetched in batches and cached.
Expected to be implemented by request scoped context objects that have access to the DagsterInstance.
"""
@property
@abstractmethod
def instance(self) -> "DagsterInstance":
raise NotImplementedError()
@property
@abstractmethod
def loaders(self) -> dict[type, tuple[DataLoader, BlockingDataLoader]]:
raise NotImplementedError()
def get_loaders_for(self, ttype: type) -> tuple[DataLoader, BlockingDataLoader]:
if ttype not in self.loaders:
if not issubclass(ttype, LoadableBy):
check.failed(f"{ttype} is not Loadable")
batch_load_fn = partial(ttype._batch_load, context=self) # noqa
blocking_batch_load_fn = partial(ttype._blocking_batch_load, context=self) # noqa
self.loaders[ttype] = (
DataLoader(batch_load_fn=batch_load_fn),
BlockingDataLoader(batch_load_fn=blocking_batch_load_fn),
)
return self.loaders[ttype]
def clear_loaders(self) -> None:
for ttype in self.loaders:
del self.loaders[ttype]
TResult = TypeVar("TResult")
TKey = TypeVar("TKey")
TContext = TypeVar("TContext", bound=LoadingContext, default=LoadingContext)
| LoadingContext |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.