language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pennersr__django-allauth | tests/mocking.py | {
"start": 89,
"end": 739
} | class ____:
def __init__(self, status_code, content, headers=None):
if headers is None:
headers = {}
self.status_code = status_code
if isinstance(content, dict):
content = json.dumps(content)
headers["content-type"] = "application/json"
self.content = content.encode("utf8")
self.headers = headers
def json(self):
return json.loads(self.text)
def raise_for_status(self):
pass
@property
def ok(self):
return self.status_code // 100 == 2
@property
def text(self):
return self.content.decode("utf8")
| MockedResponse |
python | pennersr__django-allauth | allauth/socialaccount/providers/bitly/provider.py | {
"start": 436,
"end": 809
} | class ____(OAuth2Provider):
id = "bitly"
name = "Bitly"
account_class = BitlyAccount
oauth2_adapter_class = BitlyOAuth2Adapter
def extract_uid(self, data):
return str(data["login"])
def extract_common_fields(self, data):
return dict(username=data["login"], name=data.get("full_name"))
provider_classes = [BitlyProvider]
| BitlyProvider |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 19430,
"end": 24096
} | class ____(BaseBuilder, collections.abc.Sequence):
"""A builder is a class that, given a package object (i.e. associated with concrete spec),
knows how to install it.
The builder behaves like a sequence, and when iterated over return the ``phases`` of the
installation in the correct order.
"""
#: Sequence of phases. Must be defined in derived classes
phases: Tuple[str, ...] = ()
#: Build system name. Must also be defined in derived classes.
build_system: Optional[str] = None
#: Methods, with no arguments, that the adapter can find in Package classes,
#: if a builder is not defined.
package_methods: Tuple[str, ...]
# Use :attr:`package_methods` instead of this attribute, which is deprecated
legacy_methods: Tuple[str, ...] = ()
#: Methods with the same signature as phases, that the adapter can find in Package classes,
#: if a builder is not defined.
package_long_methods: Tuple[str, ...]
# Use :attr:`package_long_methods` instead of this attribute, which is deprecated
legacy_long_methods: Tuple[str, ...]
#: Attributes that the adapter can find in Package classes, if a builder is not defined
package_attributes: Tuple[str, ...]
# Use :attr:`package_attributes` instead of this attribute, which is deprecated
legacy_attributes: Tuple[str, ...] = ()
# type hints for some of the legacy methods
build_time_test_callbacks: List[str]
install_time_test_callbacks: List[str]
#: List of glob expressions. Each expression must either be absolute or relative to the package
#: source path. Matching artifacts found at the end of the build process will be copied in the
#: same directory tree as _spack_build_logfile and _spack_build_envfile.
@property
def archive_files(self) -> List[str]:
return []
def __init__(self, pkg: spack.package_base.PackageBase) -> None:
super().__init__(pkg)
self.callbacks = {}
for phase in self.phases:
self.callbacks[phase] = InstallationPhase(phase, self)
def __getitem__(self, idx):
key = self.phases[idx]
return self.callbacks[key]
def __len__(self):
return len(self.phases)
def package_methods(builder: Type[Builder]) -> Tuple[str, ...]:
"""Returns the list of methods, taking no arguments, that are defined in the package
class and are associated with the builder.
"""
if hasattr(builder, "package_methods"):
# Package API v2.2
return builder.package_methods
return builder.legacy_methods
def package_attributes(builder: Type[Builder]) -> Tuple[str, ...]:
"""Returns the list of attributes that are defined in the package class and are associated
with the builder.
"""
if hasattr(builder, "package_attributes"):
# Package API v2.2
return builder.package_attributes
return builder.legacy_attributes
def package_long_methods(builder: Type[Builder]) -> Tuple[str, ...]:
"""Returns the list of methods, with the same signature as phases, that are defined in
the package class and are associated with the builder.
"""
if hasattr(builder, "package_long_methods"):
# Package API v2.2
return builder.package_long_methods
return getattr(builder, "legacy_long_methods", tuple())
def sanity_check_prefix(builder: Builder):
"""Check that specific directories and files are created after installation.
The files to be checked are in the ``sanity_check_is_file`` attribute of the
package object, while the directories are in the ``sanity_check_is_dir``.
Args:
builder: builder that installed the package
"""
pkg = builder.pkg
def check_paths(path_list: List[str], filetype: str, predicate: Callable[[str], bool]) -> None:
if isinstance(path_list, str):
path_list = [path_list]
for path in path_list:
if not predicate(os.path.join(pkg.prefix, path)):
raise spack.error.InstallError(
f"Install failed for {pkg.name}. No such {filetype} in prefix: {path}"
)
check_paths(pkg.sanity_check_is_file, "file", os.path.isfile)
check_paths(pkg.sanity_check_is_dir, "directory", os.path.isdir)
# Check that the prefix is not empty apart from the .spack/ directory
with os.scandir(pkg.prefix) as entries:
f = next(
(f for f in entries if not (f.name == ".spack" and f.is_dir(follow_symlinks=False))),
None,
)
if f is None:
raise spack.error.InstallError(f"Install failed for {pkg.name}. Nothing was installed!")
| Builder |
python | huggingface__transformers | src/transformers/models/mistral3/modular_mistral3.py | {
"start": 4215,
"end": 4279
} | class ____(LlavaPreTrainedModel):
pass
| Mistral3PreTrainedModel |
python | getsentry__sentry | src/sentry/testutils/helpers/apigateway.py | {
"start": 1009,
"end": 1334
} | class ____(OrganizationEndpoint):
permission_classes: tuple[type[BasePermission], ...] = (AllowAny,)
def get(self, request, organization):
return Response({"proxy": False})
def post(self, request, organization):
return HttpResponseRedirect("https://zombo.com")
@region_silo_endpoint
| RegionEndpoint |
python | kamyu104__LeetCode-Solutions | Python/taking-maximum-energy-from-the-mystic-dungeon.py | {
"start": 37,
"end": 492
} | class ____(object):
def maximumEnergy(self, energy, k):
"""
:type energy: List[int]
:type k: int
:rtype: int
"""
result = float("-inf")
for i in xrange(k):
curr = 0
for j in reversed(xrange(((len(energy)-i)-1)%k, len(energy)-i, k)): # xrange(len(energy)-1-i, -1, -k)
curr += energy[j]
result = max(result, curr)
return result
| Solution |
python | getsentry__sentry | src/sentry/codecov/endpoints/repositories/serializers.py | {
"start": 505,
"end": 2421
} | class ____(serializers.Serializer):
"""
Serializer for repositories response
"""
results = RepositoryNodeSerializer(many=True)
pageInfo = PageInfoSerializer()
totalCount = serializers.IntegerField()
def to_representation(self, graphql_response):
"""
Transform the GraphQL response to the serialized format
"""
try:
repository_data = graphql_response["data"]["owner"]["repositories"]
repositories = repository_data["edges"]
page_info = repository_data.get("pageInfo", {})
nodes = []
for edge in repositories:
node = edge["node"]
nodes.append(node)
response_data = {
"results": nodes,
"pageInfo": repository_data.get(
"pageInfo",
{
"hasNextPage": page_info.get("hasNextPage", False),
"hasPreviousPage": page_info.get("hasPreviousPage", False),
"startCursor": page_info.get("startCursor"),
"endCursor": page_info.get("endCursor"),
},
),
"totalCount": repository_data.get("totalCount", len(nodes)),
}
return super().to_representation(response_data)
except (KeyError, TypeError) as e:
sentry_sdk.capture_exception(e)
logger.exception(
"Error parsing GraphQL response",
extra={
"error": str(e),
"endpoint": "repositories",
"response_keys": (
list(graphql_response.keys())
if isinstance(graphql_response, dict)
else None
),
},
)
raise
| RepositoriesSerializer |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 834098,
"end": 834593
} | class ____(mupdf.PdfFilterOptions2):
def __init__(self):
super().__init__()
self.use_virtual_image_filter()
def image_filter( self, ctx, ctm, name, image):
assert isinstance(ctm, mupdf.fz_matrix)
JM_image_filter(self, mupdf.FzMatrix(ctm), name, image)
if mupdf_cppyy:
# cppyy doesn't appear to treat returned None as nullptr,
# resulting in obscure 'python exception' exception.
return 0
| JM_image_reporter_Filter |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 11170,
"end": 11237
} | class ____(HTTPServerError):
status_code = 501
| HTTPNotImplemented |
python | spyder-ide__spyder | spyder/plugins/editor/extensions/docstring.py | {
"start": 29586,
"end": 38117
} | class ____(object):
"""Parse function definition text."""
def __init__(self):
"""."""
self.has_info = False
self.func_text = ''
self.args_text = ''
self.func_indent = ''
self.arg_name_list = []
self.arg_type_list = []
self.arg_value_list = []
self.return_type_annotated = None
self.return_value_in_body = []
self.raise_list = None
self.has_yield = False
@staticmethod
def is_char_in_pairs(pos_char, pairs):
"""Return True if the character is in pairs of brackets or quotes."""
for pos_left, pos_right in pairs.items():
if pos_left < pos_char < pos_right:
return True
return False
@staticmethod
def _find_quote_position(text):
"""Return the start and end position of pairs of quotes."""
pos = {}
is_found_left_quote = False
for idx, character in enumerate(text):
if is_found_left_quote is False:
if character == "'" or character == '"':
is_found_left_quote = True
quote = character
left_pos = idx
else:
if character == quote and text[idx - 1] != '\\':
pos[left_pos] = idx
is_found_left_quote = False
if is_found_left_quote:
raise IndexError("No matching close quote at: " + str(left_pos))
return pos
def _find_bracket_position(self, text, bracket_left, bracket_right,
pos_quote):
"""Return the start and end position of pairs of brackets.
https://stackoverflow.com/questions/29991917/
indices-of-matching-parentheses-in-python
"""
pos = {}
pstack = []
for idx, character in enumerate(text):
if character == bracket_left and \
not self.is_char_in_pairs(idx, pos_quote):
pstack.append(idx)
elif character == bracket_right and \
not self.is_char_in_pairs(idx, pos_quote):
if len(pstack) == 0:
raise IndexError(
"No matching closing parens at: " + str(idx))
pos[pstack.pop()] = idx
if len(pstack) > 0:
raise IndexError(
"No matching opening parens at: " + str(pstack.pop()))
return pos
def split_arg_to_name_type_value(self, args_list):
"""Split argument text to name, type, value."""
for arg in args_list:
arg_type = None
arg_value = None
has_type = False
has_value = False
pos_colon = arg.find(':')
pos_equal = arg.find('=')
if pos_equal > -1:
has_value = True
if pos_colon > -1:
if not has_value:
has_type = True
elif pos_equal > pos_colon: # exception for def foo(arg1=":")
has_type = True
if has_value and has_type:
arg_name = arg[0:pos_colon].strip()
arg_type = arg[pos_colon + 1:pos_equal].strip()
arg_value = arg[pos_equal + 1:].strip()
elif not has_value and has_type:
arg_name = arg[0:pos_colon].strip()
arg_type = arg[pos_colon + 1:].strip()
elif has_value and not has_type:
arg_name = arg[0:pos_equal].strip()
arg_value = arg[pos_equal + 1:].strip()
else:
arg_name = arg.strip()
self.arg_name_list.append(arg_name)
self.arg_type_list.append(arg_type)
self.arg_value_list.append(arg_value)
def split_args_text_to_list(self, args_text):
"""Split the text including multiple arguments to list.
This function uses a comma to separate arguments and ignores a comma in
brackets and quotes.
"""
args_list = []
idx_find_start = 0
idx_arg_start = 0
try:
pos_quote = self._find_quote_position(args_text)
pos_round = self._find_bracket_position(args_text, '(', ')',
pos_quote)
pos_curly = self._find_bracket_position(args_text, '{', '}',
pos_quote)
pos_square = self._find_bracket_position(args_text, '[', ']',
pos_quote)
except IndexError:
return None
while True:
pos_comma = args_text.find(',', idx_find_start)
if pos_comma == -1:
break
idx_find_start = pos_comma + 1
if self.is_char_in_pairs(pos_comma, pos_round) or \
self.is_char_in_pairs(pos_comma, pos_curly) or \
self.is_char_in_pairs(pos_comma, pos_square) or \
self.is_char_in_pairs(pos_comma, pos_quote):
continue
args_list.append(args_text[idx_arg_start:pos_comma])
idx_arg_start = pos_comma + 1
if idx_arg_start < len(args_text):
args_list.append(args_text[idx_arg_start:])
return args_list
def parse_def(self, text):
"""Parse the function definition text."""
self.__init__()
if not is_start_of_function(text):
return
self.func_indent = get_indent(text)
text = text.strip()
return_type_re = re.search(
r'->[ ]*([\"\'a-zA-Z0-9_,()\[\] ]*):$', text)
if return_type_re:
self.return_type_annotated = return_type_re.group(1).strip(" ()\\")
if is_tuple_strings(self.return_type_annotated):
self.return_type_annotated = (
"(" + self.return_type_annotated + ")"
)
text_end = text.rfind(return_type_re.group(0))
else:
self.return_type_annotated = None
text_end = len(text)
pos_args_start = text.find('(') + 1
pos_args_end = text.rfind(')', pos_args_start, text_end)
self.args_text = text[pos_args_start:pos_args_end]
args_list = self.split_args_text_to_list(self.args_text)
if args_list is not None:
self.has_info = True
self.split_arg_to_name_type_value(args_list)
def parse_body(self, text):
"""Parse the function body text."""
re_raise = re.findall(r'[ \t]raise ([a-zA-Z0-9_]*)', text)
if len(re_raise) > 0:
self.raise_list = [x.strip() for x in re_raise]
# remove duplicates from list while keeping it in the order
# in python 2.7
# stackoverflow.com/questions/7961363/removing-duplicates-in-lists
self.raise_list = list(OrderedDict.fromkeys(self.raise_list))
re_yield = re.search(r'[ \t]yield ', text)
if re_yield:
self.has_yield = True
# get return value
pattern_return = r'return |yield '
line_list = text.split('\n')
is_found_return = False
line_return_tmp = ''
for line in line_list:
line = line.strip()
if is_found_return is False:
if re.match(pattern_return, line):
is_found_return = True
if is_found_return:
line_return_tmp += line
# check the integrity of line
try:
pos_quote = self._find_quote_position(line_return_tmp)
if line_return_tmp[-1] == '\\':
line_return_tmp = line_return_tmp[:-1]
continue
self._find_bracket_position(line_return_tmp, '(', ')',
pos_quote)
self._find_bracket_position(line_return_tmp, '{', '}',
pos_quote)
self._find_bracket_position(line_return_tmp, '[', ']',
pos_quote)
except IndexError:
continue
return_value = re.sub(pattern_return, '', line_return_tmp)
self.return_value_in_body.append(return_value)
is_found_return = False
line_return_tmp = ''
| FunctionInfo |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 45626,
"end": 45926
} | class ____(TestErrorInApplication):
def application(self, env, start_response):
self.error = greentest.ExpectedException('TestError_after_start_response.application')
start_response('200 OK', [('Content-Type', 'text/plain')])
raise self.error
| TestError_after_start_response |
python | pydata__xarray | xarray/core/dtypes.py | {
"start": 379,
"end": 556
} | class ____:
def __gt__(self, other):
return True
def __eq__(self, other):
return isinstance(other, type(self))
@functools.total_ordering
| AlwaysGreaterThan |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_dynamic/_preserve_defaults.py | {
"start": 505,
"end": 5458
} | class ____:
def __init__(self, name: str) -> None:
self.name = name
def __repr__(self) -> str:
return self.name
def _get_arguments(obj: Any, /) -> ast.arguments | None:
"""Parse 'ast.arguments' from an object.
This tries to parse the original code for an object and returns
an 'ast.arguments' node.
"""
try:
source = inspect.getsource(obj)
if source.startswith((' ', '\t')):
# 'obj' is in some indented block.
module = ast.parse('if True:\n' + source)
subject = module.body[0].body[0] # type: ignore[attr-defined]
else:
module = ast.parse(source)
subject = module.body[0]
except (OSError, TypeError):
# bail; failed to load source for 'obj'.
return None
except SyntaxError:
if _is_lambda(obj):
# Most likely a multi-line arising from detecting a lambda, e.g.:
#
# class Egg:
# x = property(
# lambda self: 1, doc="...")
return None
# Other syntax errors that are not due to the fact that we are
# documenting a lambda function are propagated
# (in particular if a lambda is renamed by the user).
raise
return _get_arguments_inner(subject)
def _is_lambda(x: Any, /) -> bool:
return isinstance(x, types.LambdaType) and x.__name__ == _LAMBDA_NAME
def _get_arguments_inner(x: Any, /) -> ast.arguments | None:
if isinstance(x, (ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda)):
return x.args
if isinstance(x, (ast.Assign, ast.AnnAssign)):
return _get_arguments_inner(x.value)
return None
def get_default_value(lines: list[str], position: ast.expr) -> str | None:
try:
if position.lineno == position.end_lineno:
line = lines[position.lineno - 1]
return line[position.col_offset : position.end_col_offset]
else:
# multiline value is not supported now
return None
except (AttributeError, IndexError):
return None
def update_default_value(obj: Any, bound_method: bool) -> None:
"""Update default value info of *obj* using type_comments."""
try:
lines = inspect.getsource(obj).splitlines()
if lines[0].startswith((' ', '\t')):
# insert a dummy line to follow what _get_arguments() does.
lines.insert(0, '')
except (OSError, TypeError):
lines = []
try:
args = _get_arguments(obj)
except SyntaxError:
return
if args is None:
# If the object is a built-in, we won't be always able to recover
# the function definition and its arguments. This happens if *obj*
# is the `__init__` method generated automatically for dataclasses.
return
if not args.defaults and not args.kw_defaults:
return
try:
if bound_method and inspect.ismethod(obj) and hasattr(obj, '__func__'):
sig = inspect.signature(obj.__func__)
else:
sig = inspect.signature(obj)
defaults = list(args.defaults)
kw_defaults = list(args.kw_defaults)
parameters = list(sig.parameters.values())
for i, param in enumerate(parameters):
if param.default is param.empty:
if param.kind == param.KEYWORD_ONLY:
# Consume kw_defaults for kwonly args
kw_defaults.pop(0)
else:
if param.kind in {param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD}:
default = defaults.pop(0)
value = get_default_value(lines, default)
if value is None:
value = ast_unparse(default)
parameters[i] = param.replace(default=DefaultValue(value))
else:
default = kw_defaults.pop(0) # type: ignore[assignment]
value = get_default_value(lines, default)
if value is None:
value = ast_unparse(default)
parameters[i] = param.replace(default=DefaultValue(value))
sig = sig.replace(parameters=parameters)
try:
obj.__signature__ = sig
except AttributeError:
# __signature__ can't be set directly on bound methods.
obj.__dict__['__signature__'] = sig
except (AttributeError, TypeError):
# Failed to update signature (e.g. built-in or extension types).
# For user-defined functions, "obj" may not have __dict__,
# e.g. when decorated with a class that defines __slots__.
# In this case, we can't set __signature__.
return
except NotImplementedError as exc: # failed to ast_unparse()
logger.warning(
__('Failed to parse a default argument value for %r: %s'), obj, exc
)
| DefaultValue |
python | ray-project__ray | rllib/connectors/env_to_module/prev_actions_prev_rewards.py | {
"start": 513,
"end": 6813
} | class ____(ConnectorV2):
"""A connector piece that adds previous rewards and actions to the input obs.
- Requires Columns.OBS to be already a part of the batch.
- This connector makes the assumption that under the Columns.OBS key in batch,
there is either a list of individual env observations to be flattened (single-agent
case) or a dict mapping (AgentID, ModuleID)-tuples to lists of data items to be
flattened (multi-agent case).
- Converts Columns.OBS data into a dict (or creates a sub-dict if obs are
already a dict), and adds "prev_rewards" and "prev_actions"
to this dict. The original observations are stored under the self.ORIG_OBS_KEY in
that dict.
- If your RLModule does not handle dict inputs, you will have to plug in an
`FlattenObservations` connector piece after this one.
- Does NOT work in a Learner pipeline as it operates on individual observation
items (as opposed to batched/time-ranked data).
- Therefore, assumes that the altered (flattened) observations will be written
back into the episode by a later connector piece in the env-to-module pipeline
(which this piece is part of as well).
- Only reads reward- and action information from the given list of Episode objects.
- Does NOT write any observations (or other data) to the given Episode objects.
"""
ORIG_OBS_KEY = "_orig_obs"
PREV_ACTIONS_KEY = "prev_n_actions"
PREV_REWARDS_KEY = "prev_n_rewards"
@override(ConnectorV2)
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
if self._multi_agent:
ret = {}
for agent_id, obs_space in input_observation_space.spaces.items():
act_space = input_action_space[agent_id]
ret[agent_id] = self._convert_individual_space(obs_space, act_space)
return gym.spaces.Dict(ret)
else:
return self._convert_individual_space(
input_observation_space, input_action_space
)
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
multi_agent: bool = False,
n_prev_actions: int = 1,
n_prev_rewards: int = 1,
**kwargs,
):
"""Initializes a PrevActionsPrevRewards instance.
Args:
multi_agent: Whether this is a connector operating on a multi-agent
observation space mapping AgentIDs to individual agents' observations.
n_prev_actions: The number of previous actions to include in the output
data. Discrete actions are ont-hot'd. If > 1, will concatenate the
individual action tensors.
n_prev_rewards: The number of previous rewards to include in the output
data.
"""
super().__init__(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
**kwargs,
)
self._multi_agent = multi_agent
self.n_prev_actions = n_prev_actions
self.n_prev_rewards = n_prev_rewards
# TODO: Move into input_observation_space setter
# Thus far, this connector piece only operates on discrete action spaces.
# act_spaces = [self.input_action_space]
# if self._multi_agent:
# act_spaces = self.input_action_space.spaces.values()
# if not all(isinstance(s, gym.spaces.Discrete) for s in act_spaces):
# raise ValueError(
# f"{type(self).__name__} only works on Discrete action spaces "
# f"thus far (or, for multi-agent, on Dict spaces mapping AgentIDs to "
# f"the individual agents' Discrete action spaces)!"
# )
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Optional[Dict[str, Any]],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
for sa_episode in self.single_agent_episode_iterator(
episodes, agents_that_stepped_only=True
):
# Episode is not numpy'ized yet and thus still operates on lists of items.
assert not sa_episode.is_numpy
augmented_obs = {self.ORIG_OBS_KEY: sa_episode.get_observations(-1)}
if self.n_prev_actions:
augmented_obs[self.PREV_ACTIONS_KEY] = flatten_to_single_ndarray(
batch_fn(
sa_episode.get_actions(
indices=slice(-self.n_prev_actions, None),
fill=0.0,
one_hot_discrete=True,
)
)
)
if self.n_prev_rewards:
augmented_obs[self.PREV_REWARDS_KEY] = np.array(
sa_episode.get_rewards(
indices=slice(-self.n_prev_rewards, None),
fill=0.0,
)
)
# Write new observation directly back into the episode.
sa_episode.set_observations(at_indices=-1, new_data=augmented_obs)
# We set the Episode's observation space to ours so that we can safely
# set the last obs to the new value (without causing a space mismatch
# error).
sa_episode.observation_space = self.observation_space
return batch
def _convert_individual_space(self, obs_space, act_space):
return gym.spaces.Dict(
{
self.ORIG_OBS_KEY: obs_space,
# Currently only works for Discrete action spaces.
self.PREV_ACTIONS_KEY: Box(
0.0, 1.0, (act_space.n * self.n_prev_actions,), np.float32
),
self.PREV_REWARDS_KEY: Box(
float("-inf"),
float("inf"),
(self.n_prev_rewards,),
np.float32,
),
}
)
| PrevActionsPrevRewards |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 5102,
"end": 5218
} | class ____(models.Model):
articles = models.ManyToManyField(ModelArticle, related_name="orderline")
| ModelOrderLine |
python | pypa__pip | src/pip/_vendor/rich/errors.py | {
"start": 271,
"end": 344
} | class ____(ConsoleError):
"""Style stack is invalid."""
| StyleStackError |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 41259,
"end": 41991
} | class ____:
"""Test fi_FI address provider methods"""
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in FiFiAddressProvider.cities
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.street_suffix()
assert isinstance(suffix, str)
assert suffix in FiFiAddressProvider.street_suffixes
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in FiFiAddressProvider.states
| TestFiFi |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 17911,
"end": 17960
} | class ____(RootModel[Any]):
root: Any
| JsonValue |
python | docker__docker-py | tests/integration/api_client_test.py | {
"start": 478,
"end": 912
} | class ____(unittest.TestCase):
def test_client_init(self):
client = docker.APIClient(version='auto', **kwargs_from_env())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
assert client_version == api_version
api_version_2 = client.version()['ApiVersion']
assert client_version == api_version_2
client.close()
| AutoDetectVersionTest |
python | django__django | tests/test_client_regress/models.py | {
"start": 104,
"end": 357
} | class ____(AbstractBaseUser):
email = models.EmailField(verbose_name="email address", max_length=255, unique=True)
custom_objects = BaseUserManager()
USERNAME_FIELD = "email"
class Meta:
app_label = "test_client_regress"
| CustomUser |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/legacy/legacy_context.py | {
"start": 2033,
"end": 18188
} | class ____:
"""Context object containing methods and properties used for evaluating the entire state of an
asset's automation rules.
"""
asset_key: AssetKey
condition: "AutomationCondition"
cursor: Optional[AutomationConditionCursor]
node_cursor: Optional[AutomationConditionNodeCursor]
candidate_subset: ValidAssetSubset
instance_queryer: "CachingInstanceQueryer"
data_time_resolver: "CachingDataTimeResolver"
request_subsets_by_key: Mapping[EntityKey, EntitySubset]
expected_data_time_mapping: Mapping[AssetKey, Optional[datetime.datetime]]
start_timestamp: float
respect_materialization_data_versions: bool
auto_materialize_run_tags: Mapping[str, str]
logger: logging.Logger
root_ref: Optional["LegacyRuleEvaluationContext"] = None
@staticmethod
def create(asset_key: AssetKey, evaluator: "AutomationConditionEvaluator"):
instance_queryer = evaluator.asset_graph_view.get_inner_queryer_for_back_compat()
cursor = evaluator.cursor.get_previous_condition_cursor(asset_key)
condition = check.not_none(evaluator.asset_graph.get(asset_key).automation_condition)
partitions_def = evaluator.asset_graph.get(asset_key).partitions_def
return LegacyRuleEvaluationContext(
asset_key=asset_key,
condition=condition,
cursor=cursor,
node_cursor=cursor.node_cursors_by_unique_id.get(
condition.get_node_unique_id(parent_unique_id=None, index=0, target_key=None)
)
if cursor
else None,
candidate_subset=ValidAssetSubset.all(asset_key, partitions_def),
data_time_resolver=evaluator.legacy_data_time_resolver,
instance_queryer=instance_queryer,
request_subsets_by_key=evaluator.request_subsets_by_key,
expected_data_time_mapping=evaluator.legacy_expected_data_time_by_key,
start_timestamp=get_current_timestamp(),
respect_materialization_data_versions=evaluator.legacy_respect_materialization_data_versions,
auto_materialize_run_tags=evaluator.legacy_auto_materialize_run_tags,
logger=evaluator.logger,
)
def for_child(
self,
child_condition: "AutomationCondition",
child_unique_id: str,
candidate_subset: EntitySubset,
) -> "LegacyRuleEvaluationContext":
return dataclasses.replace(
self,
condition=child_condition,
node_cursor=self.cursor.node_cursors_by_unique_id.get(child_unique_id)
if self.cursor
else None,
candidate_subset=ValidAssetSubset(
key=candidate_subset.key, value=candidate_subset.get_internal_value()
),
root_ref=self.root_context,
start_timestamp=get_current_timestamp(),
)
@property
def root_context(self) -> "LegacyRuleEvaluationContext":
"""A reference to the context of the root condition for this evaluation."""
return self.root_ref or self
@property
def asset_graph(self) -> "BaseAssetGraph":
return self.instance_queryer.asset_graph
@property
def partitions_def(self) -> Optional[PartitionsDefinition]:
return self.asset_graph.get(self.asset_key).partitions_def
@property
def evaluation_time(self) -> datetime.datetime:
"""Returns the time at which this rule is being evaluated."""
return self.instance_queryer.evaluation_time
@property
def previous_max_storage_id(self) -> Optional[int]:
return self.cursor.temporal_context.last_event_id if self.cursor else None
@property
def previous_evaluation_timestamp(self) -> Optional[float]:
return self.cursor.effective_timestamp if self.cursor else None
@property
def previous_true_subset(self) -> SerializableEntitySubset:
if self.node_cursor is None:
return self.empty_subset()
return self.node_cursor.true_subset
@property
def previous_candidate_subset(self) -> SerializableEntitySubset:
if self.node_cursor is None:
return self.empty_subset()
candidate_subset = self.node_cursor.candidate_subset
if isinstance(candidate_subset, HistoricalAllPartitionsSubsetSentinel):
return ValidAssetSubset.all(self.asset_key, self.partitions_def)
else:
return candidate_subset
@property
def previous_subsets_with_metadata(self) -> Sequence[AssetSubsetWithMetadata]:
if self.node_cursor is None:
return []
return self.node_cursor.subsets_with_metadata
@functools.cached_property
@root_property
def parent_will_update_subset(self) -> ValidAssetSubset:
"""Returns the set of asset partitions whose parents will be updated on this tick, and which
can be materialized in the same run as this asset.
"""
subset = self.empty_subset()
for parent_key in self.asset_graph.get(self.asset_key).parent_keys:
if not self.materializable_in_same_run(self.asset_key, parent_key):
continue
parent_subset = self.request_subsets_by_key.get(parent_key)
if not parent_subset:
continue
parent_subset = ValidAssetSubset.coerce_from_subset(
parent_subset.convert_to_serializable_subset(), self.partitions_def
)
subset |= replace(parent_subset, key=self.asset_key)
return subset
@functools.cached_property
@root_property
def materialized_since_previous_tick_subset(self) -> ValidAssetSubset:
"""Returns the set of asset partitions that were materialized since the previous tick."""
return ValidAssetSubset.from_asset_partitions_set(
self.asset_key,
self.partitions_def,
self.instance_queryer.get_asset_partitions_updated_after_cursor(
self.asset_key,
asset_partitions=None,
after_cursor=self.previous_max_storage_id,
respect_materialization_data_versions=False,
),
)
@property
@root_property
def _previous_tick_discarded_subset(self) -> Optional[SerializableEntitySubset[AssetKey]]:
"""Fetches the unique id corresponding to the DiscardOnMaxMaterializationsExceededRule, if
that rule is part of the broader condition.
"""
from dagster._core.definitions.auto_materialize_rule_impls import (
DiscardOnMaxMaterializationsExceededRule,
)
from dagster._core.definitions.declarative_automation.legacy.rule_condition import (
RuleCondition,
)
from dagster._core.definitions.declarative_automation.operators import (
NotAutomationCondition,
)
# if you have a discard condition, it'll be part of a structure of the form
# Or(MaterializeCond, Not(SkipCond), Not(DiscardCond))
if len(self.condition.children) != 3:
return None
unique_id = self.condition.get_node_unique_id(
parent_unique_id=None, index=None, target_key=None
)
# get Not(DiscardCond)
not_discard_condition = self.condition.children[2]
unique_id = not_discard_condition.get_node_unique_id(
parent_unique_id=unique_id, index=2, target_key=None
)
if not isinstance(not_discard_condition, NotAutomationCondition):
return None
# get DiscardCond
discard_condition = not_discard_condition.children[0]
unique_id = discard_condition.get_node_unique_id(
parent_unique_id=unique_id, index=0, target_key=None
)
if not isinstance(discard_condition, RuleCondition) or not isinstance(
discard_condition.rule, DiscardOnMaxMaterializationsExceededRule
):
return None
# grab the stored cursor value for the discard condition
discard_cursor = (
self.cursor.node_cursors_by_unique_id.get(unique_id) if self.cursor else None
)
return discard_cursor.true_subset if discard_cursor else None
@property
@root_property
def previous_tick_requested_subset(self) -> SerializableEntitySubset:
"""The set of asset partitions that were requested (or discarded) on the previous tick."""
if self.cursor is None:
return self.empty_subset()
discarded_subset = self._previous_tick_discarded_subset
requested_subset = self.cursor.previous_requested_subset
return (
ValidAssetSubset.coerce_from_subset(requested_subset, self.partitions_def)
| discarded_subset
if discarded_subset
else requested_subset
)
@property
def materialized_requested_or_discarded_since_previous_tick_subset(self) -> ValidAssetSubset:
"""Returns the set of asset partitions that were materialized since the previous tick."""
return self.materialized_since_previous_tick_subset | self.previous_tick_requested_subset
@functools.cached_property
@root_property
def _parent_has_updated_subset_and_new_latest_storage_id(
self,
) -> tuple[ValidAssetSubset, Optional[int]]:
"""Returns the set of asset partitions whose parents have updated since the last time this
condition was evaluated.
"""
max_child_partitions_str = os.getenv("DAGSTER_MAX_AMP_CHILD_PARTITIONS", None)
(
asset_partitions,
cursor,
) = self.root_context.instance_queryer.asset_partitions_with_newly_updated_parents_and_new_cursor(
latest_storage_id=self.previous_max_storage_id,
child_asset_key=self.root_context.asset_key,
map_old_time_partitions=False,
max_child_partitions=int(max_child_partitions_str)
if max_child_partitions_str
else None,
)
return ValidAssetSubset.from_asset_partitions_set(
self.asset_key, self.partitions_def, asset_partitions
), cursor
@property
@root_property
def parent_has_updated_subset(self) -> ValidAssetSubset:
subset, _ = self._parent_has_updated_subset_and_new_latest_storage_id
return subset
@property
@root_property
def new_max_storage_id(self) -> Optional[int]:
_, storage_id = self._parent_has_updated_subset_and_new_latest_storage_id
return storage_id
@property
def parent_has_or_will_update_subset(self) -> ValidAssetSubset:
"""Returns the set of asset partitions whose parents have updated since the last time this
condition was evaluated, or will update on this tick.
"""
return self.parent_has_updated_subset | self.root_context.parent_will_update_subset
@property
def candidate_parent_has_or_will_update_subset(self) -> ValidAssetSubset:
"""Returns the set of candidates for this tick which have parents that have updated since
the previous tick, or will update on this tick.
"""
return self.candidate_subset & self.parent_has_or_will_update_subset
@property
def candidates_not_evaluated_on_previous_tick_subset(self) -> ValidAssetSubset:
"""Returns the set of candidates for this tick which were not candidates on the previous
tick.
"""
from dagster._core.definitions.declarative_automation.serialized_objects import (
HistoricalAllPartitionsSubsetSentinel,
)
if not self.node_cursor:
return self.candidate_subset
# when the candidate_subset is HistoricalAllPartitionsSubsetSentinel, this indicates that the
# entire asset was evaluated for this condition on the previous tick, and so no candidates
# were *not* evaluated on the previous tick
elif isinstance(self.node_cursor.candidate_subset, HistoricalAllPartitionsSubsetSentinel):
return self.empty_subset()
return self.candidate_subset - self.node_cursor.candidate_subset
def materializable_in_same_run(self, child_key: AssetKey, parent_key: AssetKey) -> bool:
"""Returns whether a child asset can be materialized in the same run as a parent asset."""
from dagster._core.definitions.assets.graph.asset_graph import executable_in_same_run
return executable_in_same_run(self.asset_graph, child_key, parent_key)
def get_parents_that_will_not_be_materialized_on_current_tick(
self, *, asset_partition: AssetKeyPartitionKey
) -> AbstractSet[AssetKeyPartitionKey]:
"""Returns the set of parent asset partitions that will not be updated in the same run of
this asset partition if a run is launched for this asset partition on this tick.
"""
return {
parent
for parent in self.asset_graph.get_parents_partitions(
asset_key=asset_partition.asset_key, partition_key=asset_partition.partition_key
).parent_partitions
if not self.will_update_asset_partition(parent)
or not self.materializable_in_same_run(asset_partition.asset_key, parent.asset_key)
}
def will_update_asset_partition(self, asset_partition: AssetKeyPartitionKey) -> bool:
parent_subset = self.request_subsets_by_key.get(asset_partition.asset_key)
if not parent_subset:
return False
return asset_partition in parent_subset.convert_to_serializable_subset()
def add_evaluation_data_from_previous_tick(
self,
asset_partitions_by_frozen_metadata: Mapping[
frozenset[tuple[str, MetadataValue]], AbstractSet[AssetKeyPartitionKey]
],
ignore_subset: SerializableEntitySubset,
) -> tuple[ValidAssetSubset, Sequence[AssetSubsetWithMetadata]]:
"""Combines information calculated on this tick with information from the previous tick,
returning a tuple of the combined true subset and the combined subsets with metadata.
Args:
asset_partitions_by_frozen_metadata: A mapping from metadata to the set of asset
partitions that the rule applies to.
ignore_subset: An EntitySubset which represents information that we should *not* carry
forward from the previous tick.
"""
from dagster._core.definitions.declarative_automation.serialized_objects import (
AssetSubsetWithMetadata,
)
mapping = defaultdict(lambda: self.empty_subset())
has_new_metadata_subset = self.empty_subset()
for frozen_metadata, asset_partitions in asset_partitions_by_frozen_metadata.items():
mapping[frozen_metadata] = ValidAssetSubset.from_asset_partitions_set(
self.asset_key, self.partitions_def, asset_partitions
)
has_new_metadata_subset |= mapping[frozen_metadata]
# don't use information from the previous tick if we have explicit metadata for it or
# we've explicitly said to ignore it
ignore_subset = has_new_metadata_subset | ignore_subset
for elt in self.previous_subsets_with_metadata:
carry_forward_subset = (
ValidAssetSubset.coerce_from_subset(elt.subset, self.partitions_def) - ignore_subset
)
if carry_forward_subset.size > 0:
mapping[elt.frozen_metadata] |= carry_forward_subset
# for now, an asset is in the "true" subset if and only if we have some metadata for it
true_subset = self.empty_subset()
for subset in mapping.values():
true_subset |= subset
return (
self.candidate_subset & true_subset,
[
AssetSubsetWithMetadata(subset=subset, metadata=dict(metadata))
for metadata, subset in mapping.items()
],
)
def empty_subset(self) -> ValidAssetSubset:
return ValidAssetSubset.empty(self.asset_key, self.partitions_def)
| LegacyRuleEvaluationContext |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 64395,
"end": 64774
} | class ____:
def test_basic(self, xp):
b = xp.asarray([1])
a = xp.asarray([1, 1])
b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
assert_array_almost_equal(b_bs, xp.asarray([1, 0, 0.17407]), decimal=5)
assert_array_almost_equal(a_bs, xp.asarray([1, 0.18461, 0.17407]), decimal=5)
@make_xp_test_case(bilinear)
| TestLp2bs |
python | huggingface__transformers | src/transformers/models/mobilevit/modeling_mobilevit.py | {
"start": 9738,
"end": 10364
} | class ____(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
| MobileViTIntermediate |
python | modin-project__modin | modin/core/storage_formats/pandas/parsers.py | {
"start": 14808,
"end": 22672
} | class ____(PandasParser):
@classmethod
def get_sheet_data(cls, sheet, convert_float):
"""
Get raw data from the excel sheet.
Parameters
----------
sheet : openpyxl.worksheet.worksheet.Worksheet
Sheet to get data from.
convert_float : bool
Whether to convert floats to ints or not.
Returns
-------
list
List with sheet data.
"""
return [
[cls._convert_cell(cell, convert_float) for cell in row]
for row in sheet.rows
]
@classmethod
def _convert_cell(cls, cell, convert_float):
"""
Convert excel cell to value.
Parameters
----------
cell : openpyxl.cell.cell.Cell
Excel cell to convert.
convert_float : bool
Whether to convert floats to ints or not.
Returns
-------
list
Value that was converted from the excel cell.
"""
if cell.is_date:
return cell.value
elif cell.data_type == "e":
return np.nan
elif cell.data_type == "b":
return bool(cell.value)
elif cell.value is None:
return ""
elif cell.data_type == "n":
if convert_float:
val = int(cell.value)
if val == cell.value:
return val
else:
return float(cell.value)
return cell.value
@staticmethod
def need_rich_text_param():
"""
Determine whether a required `rich_text` parameter should be specified for the ``WorksheetReader`` constructor.
Returns
-------
bool
"""
import openpyxl
from packaging import version
return version.parse(openpyxl.__version__) >= version.parse("3.1.0")
@staticmethod
@doc(_doc_parse_func, parameters=_doc_parse_parameters_common)
def parse(fname, **kwargs):
num_splits = kwargs.pop("num_splits", None)
start = kwargs.pop("start", None)
end = kwargs.pop("end", None)
excel_header = kwargs.get("_header")
sheet_name = kwargs.get("sheet_name", 0)
footer = b"</sheetData></worksheet>"
# Default to pandas case, where we are not splitting or partitioning
if start is None or end is None:
return pandas.read_excel(fname, **kwargs)
_skiprows = kwargs.pop("skiprows")
import re
from zipfile import ZipFile
import openpyxl
from openpyxl.reader.excel import ExcelReader
from openpyxl.worksheet._reader import WorksheetReader
from openpyxl.worksheet.worksheet import Worksheet
from pandas.core.dtypes.common import is_list_like
from pandas.io.excel._util import fill_mi_header, maybe_convert_usecols
from pandas.io.parsers import TextParser
wb = openpyxl.load_workbook(filename=fname, read_only=True)
# Get shared strings
ex = ExcelReader(fname, read_only=True)
ex.read_manifest()
ex.read_strings()
# Convert string name 0 to string
if sheet_name == 0:
sheet_name = wb.sheetnames[sheet_name]
# get the worksheet to use with the worksheet reader
ws = Worksheet(wb)
# Read the raw data
with ZipFile(fname) as z:
with z.open("xl/worksheets/{}.xml".format(sheet_name)) as file:
file.seek(start)
bytes_data = file.read(end - start)
def update_row_nums(match):
"""
Update the row numbers to start at 1.
Parameters
----------
match : re.Match object
The match from the origin `re.sub` looking for row number tags.
Returns
-------
str
The updated string with new row numbers.
Notes
-----
This is needed because the parser we are using does not scale well if
the row numbers remain because empty rows are inserted for all "missing"
rows.
"""
b = match.group(0)
return re.sub(
rb"\d+",
lambda c: str(int(c.group(0).decode("utf-8")) - _skiprows).encode(
"utf-8"
),
b,
)
bytes_data = re.sub(rb'r="[A-Z]*\d+"', update_row_nums, bytes_data)
bytesio = BytesIO(excel_header + bytes_data + footer)
# Use openpyxl to read/parse sheet data
common_args = (ws, bytesio, ex.shared_strings, False)
if PandasExcelParser.need_rich_text_param():
reader = WorksheetReader(*common_args, rich_text=False)
else:
reader = WorksheetReader(*common_args)
# Attach cells to worksheet object
reader.bind_cells()
data = PandasExcelParser.get_sheet_data(ws, kwargs.pop("convert_float", True))
usecols = maybe_convert_usecols(kwargs.pop("usecols", None))
header = kwargs.pop("header", 0)
index_col = kwargs.pop("index_col", None)
# skiprows is handled externally
skiprows = None
# Handle header and create MultiIndex for columns if necessary
if is_list_like(header) and len(header) == 1:
header = header[0]
if header is not None and is_list_like(header):
control_row = [True] * len(data[0])
for row in header:
data[row], control_row = fill_mi_header(data[row], control_row)
# Handle MultiIndex for row Index if necessary
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
# Check if dataset is empty
if offset < len(data):
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
parser = TextParser(
data,
header=header,
index_col=index_col,
has_index_names=is_list_like(header) and len(header) > 1,
skiprows=skiprows,
usecols=usecols,
skip_blank_lines=False,
**kwargs,
)
pandas_df = parser.read()
if (
len(pandas_df) > 1
and len(pandas_df.columns) != 0
and pandas_df.isnull().all().all()
):
# Drop NaN rows at the end of the DataFrame
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
# Since we know the number of rows that occur before this partition, we can
# correctly assign the index in cases of RangeIndex. If it is not a RangeIndex,
# the index is already correct because it came from the data.
if isinstance(pandas_df.index, pandas.RangeIndex):
pandas_df.index = pandas.RangeIndex(
start=_skiprows, stop=len(pandas_df.index) + _skiprows
)
# We return the length if it is a RangeIndex (common case) to reduce
# serialization cost.
if index_col is not None:
index = pandas_df.index
else:
# The lengths will become the RangeIndex
index = len(pandas_df)
return _split_result_for_readers(1, num_splits, pandas_df) + [
index,
pandas_df.dtypes,
]
@doc(_doc_pandas_parser_class, data_type="JSON files")
| PandasExcelParser |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 25865,
"end": 26082
} | class ____(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
| EmptyTensorDataset |
python | kamyu104__LeetCode-Solutions | Python/minimum-moves-to-capture-the-queen.py | {
"start": 36,
"end": 630
} | class ____(object):
def minMovesToCaptureTheQueen(self, a, b, c, d, e, f):
"""
:type a: int
:type b: int
:type c: int
:type d: int
:type e: int
:type f: int
:rtype: int
"""
if a == e and not (a == c and (b-d)*(f-d) < 0):
return 1
if b == f and not (b == d and (a-c)*(e-c) < 0):
return 1
if c+d == e+f and not (c+d == a+b and (c-a)*(e-a) < 0):
return 1
if c-d == e-f and not (c-d == a-b and (d-b)*(f-b) < 0):
return 1
return 2
| Solution |
python | getsentry__sentry | tests/sentry/relocation/tasks/test_process.py | {
"start": 105022,
"end": 105899
} | class ____(RelocationTaskTestCase):
def setUp(self) -> None:
RelocationTaskTestCase.setUp(self)
TransactionTestCase.setUp(self)
self.relocation.step = Relocation.Step.NOTIFYING.value
self.relocation.latest_task = OrderedTask.NOTIFYING_OWNER.name
self.relocation.save()
def test_success(self) -> None:
completed(self.uuid)
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.SUCCESS.value
assert not relocation.failure_reason
@patch("sentry.backup.crypto.KeyManagementServiceClient")
@patch("sentry.relocation.tasks.process.CloudBuildClient")
@patch("sentry.relocation.utils.MessageBuilder")
@patch("sentry.signals.relocated.send_robust")
@patch("sentry.signals.relocation_redeem_promo_code.send_robust")
@patch("sentry.analytics.record")
| CompletedTest |
python | doocs__leetcode | solution/3100-3199/3112.Minimum Time to Visit Disappearing Nodes/Solution.py | {
"start": 0,
"end": 680
} | class ____:
def minimumTime(
self, n: int, edges: List[List[int]], disappear: List[int]
) -> List[int]:
g = defaultdict(list)
for u, v, w in edges:
g[u].append((v, w))
g[v].append((u, w))
dist = [inf] * n
dist[0] = 0
pq = [(0, 0)]
while pq:
du, u = heappop(pq)
if du > dist[u]:
continue
for v, w in g[u]:
if dist[v] > dist[u] + w and dist[u] + w < disappear[v]:
dist[v] = dist[u] + w
heappush(pq, (dist[v], v))
return [a if a < b else -1 for a, b in zip(dist, disappear)]
| Solution |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 6894,
"end": 7240
} | class ____(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.floordiv)
| BinOpTrueDiv |
python | PyCQA__pylint | tests/functional/s/singledispatch/singledispatchmethod_function.py | {
"start": 721,
"end": 1113
} | class ____:
@singledispatchmethod
def convert_position(self, position):
pass
@convert_position.register
def _(self, position: str) -> tuple:
position_a, position_b = position.split(",")
return (int(position_a), int(position_b))
@convert_position.register
def _(self, position: tuple) -> str:
return f"{position[0]},{position[1]}"
| Board1 |
python | numpy__numpy | benchmarks/benchmarks/bench_shape_base.py | {
"start": 1938,
"end": 2536
} | class ____(Benchmark):
params = [[(16, 16), (64, 64), (256, 256), (1024, 1024)],
['uint8', 'uint16', 'uint32', 'uint64'],
[(2, 2), (4, 4)]]
param_names = ['shape', 'dtype', 'n_chunks']
def setup(self, shape, dtype, n_chunks):
self.block_list = [
[np.full(shape=[s // n_chunk for s, n_chunk in zip(shape, n_chunks)],
fill_value=1, dtype=dtype) for _ in range(n_chunks[1])]
for _ in range(n_chunks[0])
]
def time_block2d(self, shape, dtype, n_chunks):
np.block(self.block_list)
| Block2D |
python | kamyu104__LeetCode-Solutions | Python/number-of-pairs-of-interchangeable-rectangles.py | {
"start": 67,
"end": 464
} | class ____(object):
def interchangeableRectangles(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: int
"""
count = collections.defaultdict(int)
for w, h in rectangles:
g = fractions.gcd(w, h) # Time: O(logx) ~= O(1)
count[(w//g, h//g)] += 1
return sum(c*(c-1)//2 for c in count.itervalues())
| Solution |
python | PyCQA__pylint | doc/data/messages/c/catching-non-exception/good.py | {
"start": 0,
"end": 79
} | class ____(Exception):
pass
try:
1 / 0
except FooError:
pass
| FooError |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/choice.py | {
"start": 1921,
"end": 2141
} | class ____:
type: Literal["simplest"]
count: int | None
def __post_init__(self) -> None:
if self.count is not None:
assert self.count > 0
@dataclass(slots=True, frozen=False)
| ChoiceTemplate |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 15318,
"end": 17037
} | class ____(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super().__init__(expr1, **extra)
def as_sql(self, compiler, connection, **extra_context):
if (
self.geo_field.geodetic(connection)
and not connection.features.supports_length_geodetic
):
raise NotSupportedError(
"This backend doesn't support Length on geodetic fields"
)
return super().as_sql(compiler, connection, **extra_context)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
if self.source_is_geography():
clone.source_expressions.append(Value(self.spheroid))
elif self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need
# length_spheroid
function = connection.ops.spatial_function_name("LengthSpheroid")
clone.source_expressions.append(Value(self.geo_field.spheroid(connection)))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
function = connection.ops.length3d
return super(Length, clone).as_sql(
compiler, connection, function=function, **extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
function = None
if self.geo_field.geodetic(connection):
function = "GeodesicLength" if self.spheroid else "GreatCircleLength"
return super().as_sql(compiler, connection, function=function, **extra_context)
| Length |
python | google__pytype | pytype/attribute_test.py | {
"start": 4925,
"end": 8028
} | class ____(test_base.UnitTest):
def setUp(self):
super().setUp()
options = config.Options.create(python_version=self.python_version)
self._ctx = test_utils.make_context(options)
def test_type_parameter_instance(self):
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(
t, self._ctx.convert.primitive_instances[str], self._ctx
)
node, var = self._ctx.attribute_handler.get_attribute(
self._ctx.root_node, t_instance, "upper"
)
self.assertIs(node, self._ctx.root_node)
(attr,) = var.data
self.assertIsInstance(attr, abstract.PyTDFunction)
def test_type_parameter_instance_bad_attribute(self):
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(
t, self._ctx.convert.primitive_instances[str], self._ctx
)
node, var = self._ctx.attribute_handler.get_attribute(
self._ctx.root_node, t_instance, "rumpelstiltskin"
)
self.assertIs(node, self._ctx.root_node)
self.assertIsNone(var)
def test_empty_type_parameter_instance(self):
t = abstract.TypeParameter(
abstract_utils.T, self._ctx, bound=self._ctx.convert.int_type
)
instance = abstract.Instance(self._ctx.convert.list_type, self._ctx)
t_instance = abstract.TypeParameterInstance(t, instance, self._ctx)
node, var = self._ctx.attribute_handler.get_attribute(
self._ctx.root_node, t_instance, "real"
)
self.assertIs(node, self._ctx.root_node)
(attr,) = var.data
self.assertIs(attr, self._ctx.convert.primitive_instances[int])
def test_type_parameter_instance_set_attribute(self):
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(
t, self._ctx.convert.primitive_instances[str], self._ctx
)
node = self._ctx.attribute_handler.set_attribute(
self._ctx.root_node,
t_instance,
"rumpelstiltskin",
self._ctx.new_unsolvable(self._ctx.root_node),
)
self.assertIs(node, self._ctx.root_node)
self.assertEqual(
str(self._ctx.errorlog).strip(),
"Can't assign attribute 'rumpelstiltskin' on str [not-writable]",
)
def test_union_set_attribute(self):
list_instance = abstract.Instance(self._ctx.convert.list_type, self._ctx)
cls = abstract.InterpreterClass("obj", [], {}, None, None, (), self._ctx)
cls_instance = abstract.Instance(cls, self._ctx)
union = abstract.Union([cls_instance, list_instance], self._ctx)
node = self._ctx.attribute_handler.set_attribute(
self._ctx.root_node,
union,
"rumpelstiltskin",
self._ctx.convert.none_type.to_variable(self._ctx.root_node),
)
self.assertEqual(
cls_instance.members["rumpelstiltskin"].data.pop(),
self._ctx.convert.none_type,
)
self.assertIs(node, self._ctx.root_node)
(error,) = self._ctx.errorlog.unique_sorted_errors()
self.assertEqual(error.name, "not-writable")
if __name__ == "__main__":
unittest.main()
| AttributeTest |
python | huggingface__transformers | src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py | {
"start": 777,
"end": 6375
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a
GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPTBigCode
[gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTBigCodeModel`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*, defaults to None):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new",
"gelu_pytorch_tanh"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to call the fused softmax in float32.
scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to scale the attention softmax in float32.
attention_type (`bool`, *optional*, defaults to `True`):
Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).
Example:
```python
>>> from transformers import GPTBigCodeConfig, GPTBigCodeModel
>>> # Initializing a GPTBigCode configuration
>>> configuration = GPTBigCodeConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPTBigCodeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gpt_bigcode"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50257,
n_positions=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
activation_function="gelu_pytorch_tanh",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
attention_softmax_in_fp32=True,
scale_attention_softmax_in_fp32=True,
multi_query=True,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32
self.multi_query = multi_query
self.num_key_value_heads = 1 if multi_query else n_head
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
__all__ = ["GPTBigCodeConfig"]
| GPTBigCodeConfig |
python | ray-project__ray | doc/source/serve/doc_code/whisper_example.py | {
"start": 1239,
"end": 2736
} | class ____:
def __init__(self, model_size="large-v2"):
# Load model
from faster_whisper import WhisperModel
# Run on GPU with FP16
self.model = WhisperModel(model_size, device="cuda", compute_type="float16")
async def transcribe(self, file_path: str):
subprocess.check_call(["curl", "-o", "audio.mp3", "-sSfLO", file_path])
segments, info = self.model.transcribe(
"audio.mp3",
language="en",
initial_prompt="Here is the um, uh, Um, Uh, transcript.",
best_of=5,
beam_size=5,
word_timestamps=True,
)
whisper_alignments = []
transcript_text = ""
for seg in segments:
transcript_text += seg.text
whisper_alignments += clean_whisper_alignments(seg.words)
# Transcript change required to match gentle's tokenization with
# Whisper's word alignments
transcript_text = transcript_text.replace("% ", " percent ")
return {
"language": info.language,
"language_probability": info.language_probability,
"duration": info.duration,
"transcript_text": transcript_text,
"whisper_alignments": whisper_alignments,
}
async def __call__(self, req: starlette.requests.Request):
request = await req.json()
return await self.transcribe(file_path=request["filepath"])
entrypoint = WhisperModel.bind()
| WhisperModel |
python | networkx__networkx | networkx/algorithms/tests/test_regular.py | {
"start": 115,
"end": 1624
} | class ____:
@pytest.mark.parametrize("n", [3, 4, 5])
def test_k_factor_cycle(self, n):
g = nx.cycle_graph(n)
kf = nx.k_factor(g, 2)
assert g.edges == kf.edges
assert g.nodes == kf.nodes
@pytest.mark.parametrize("k", range(3))
def test_k_factor_grid(self, k):
g = nx.grid_2d_graph(4, 4)
kf = nx.k_factor(g, k)
assert g.nodes == kf.nodes
assert all(g.has_edge(*e) for e in kf.edges)
assert nx.is_k_regular(kf, k)
@pytest.mark.parametrize("k", range(6))
def test_k_factor_complete(self, k):
g = nx.complete_graph(6)
kf = nx.k_factor(g, k)
assert g.nodes == kf.nodes
assert all(g.has_edge(*e) for e in kf.edges)
assert nx.is_k_regular(kf, k)
def test_k_factor_degree(self):
g = nx.grid_2d_graph(4, 4)
with pytest.raises(nx.NetworkXUnfeasible, match=r"degree less than"):
nx.k_factor(g, 3)
def test_k_factor_no_matching(self):
g = nx.hexagonal_lattice_graph(4, 4)
# Perfect matching doesn't exist for 4,4 hexagonal lattice graph
with pytest.raises(nx.NetworkXUnfeasible, match=r"no perfect matching"):
nx.k_factor(g, 2)
@pytest.mark.parametrize("graph_type", [nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph])
def test_k_factor_not_implemented(self, graph_type):
with pytest.raises(nx.NetworkXNotImplemented, match=r"not implemented for"):
nx.k_factor(graph_type(), 2)
| TestKFactor |
python | tensorflow__tensorflow | tensorflow/python/distribute/multi_worker_util_test.py | {
"start": 5183,
"end": 6888
} | class ____(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegex(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegex(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
| IdInClusterTest |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 31113,
"end": 31938
} | class ____(_MutableListTestBase, fixtures.MappedTest):
__requires__ = ("array_type",)
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.sql.sqltypes import ARRAY
MutableList = cls._type_fixture()
Base = declarative_base(metadata=metadata)
class Mixin:
data = Column(MutableList.as_mutable(ARRAY(Integer)))
class Foo(Mixin, Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
def test_in_place_mutation_str(self):
"""this test is hardcoded to integer, skip strings"""
def test_in_place_slice_mutation_str(self):
"""this test is hardcoded to integer, skip strings"""
def test_insert2(self):
"""This test does not work with arrays, skip it"""
| MutableColumnCopyArrayTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-qdrant/destination_qdrant/indexer.py | {
"start": 937,
"end": 5921
} | class ____(Indexer):
config: QdrantIndexingConfigModel
def __init__(self, config: QdrantIndexingConfigModel, embedding_dimensions: int):
super().__init__(config)
self.embedding_dimensions = embedding_dimensions
def check(self) -> Optional[str]:
auth_method_mode = self.config.auth_method.mode
if auth_method_mode == "api_key_auth" and not self.config.url.startswith("https://"):
return "Host must start with https://"
try:
self._create_client()
if not self._client:
return "Qdrant client is not alive."
available_collections = [collection.name for collection in self._client.get_collections().collections]
distance_metric = DISTANCE_METRIC_MAP[self.config.distance_metric]
if self.config.collection in available_collections:
collection_info = self._client.get_collection(collection_name=self.config.collection)
assert (
collection_info.config.params.vectors.size == self.embedding_dimensions
), "The collection's vector's size must match the embedding dimensions"
assert (
collection_info.config.params.vectors.distance == distance_metric
), "The colection's vector's distance metric must match the selected distance metric option"
else:
self._client.recreate_collection(
collection_name=self.config.collection,
vectors_config=VectorParams(size=self.embedding_dimensions, distance=distance_metric),
)
except Exception as e:
return format_exception(e)
finally:
if self._client:
self._client.close()
def pre_sync(self, catalog: ConfiguredAirbyteCatalog) -> None:
self._create_client()
streams_to_overwrite = [
create_stream_identifier(stream.stream)
for stream in catalog.streams
if stream.destination_sync_mode == DestinationSyncMode.overwrite
]
if streams_to_overwrite:
self._delete_for_filter(
models.FilterSelector(
filter=models.Filter(
should=[
models.FieldCondition(key=METADATA_STREAM_FIELD, match=models.MatchValue(value=stream))
for stream in streams_to_overwrite
]
)
)
)
for field in [METADATA_RECORD_ID_FIELD, METADATA_STREAM_FIELD]:
self._client.create_payload_index(
collection_name=self.config.collection, field_name=field, field_schema=PayloadSchemaType.KEYWORD
)
def delete(self, delete_ids, namespace, stream):
if len(delete_ids) > 0:
self._delete_for_filter(
models.FilterSelector(
filter=models.Filter(
should=[
models.FieldCondition(key=METADATA_RECORD_ID_FIELD, match=models.MatchValue(value=_id)) for _id in delete_ids
]
)
)
)
def index(self, document_chunks, namespace, stream):
entities = []
for i in range(len(document_chunks)):
chunk = document_chunks[i]
payload = chunk.metadata
if chunk.page_content is not None:
payload[self.config.text_field] = chunk.page_content
entities.append(
models.Record(
id=str(uuid.uuid4()),
payload=payload,
vector=chunk.embedding,
)
)
self._client.upload_records(collection_name=self.config.collection, records=entities)
def post_sync(self) -> List[AirbyteMessage]:
try:
self._client.close()
return [
AirbyteMessage(
type=Type.LOG, log=AirbyteLogMessage(level=Level.INFO, message="Qdrant Database Client has been closed successfully")
)
]
except Exception as e:
return [AirbyteMessage(type=Type.LOG, log=AirbyteLogMessage(level=Level.ERROR, message=format_exception(e)))]
def _create_client(self):
auth_method = self.config.auth_method
url = self.config.url
prefer_grpc = self.config.prefer_grpc
if auth_method.mode == "no_auth":
self._client = QdrantClient(url=url, prefer_grpc=prefer_grpc)
elif auth_method.mode == "api_key_auth":
api_key = auth_method.api_key
self._client = QdrantClient(url=url, prefer_grpc=prefer_grpc, api_key=api_key)
def _delete_for_filter(self, selector: PointsSelector) -> None:
self._client.delete(collection_name=self.config.collection, points_selector=selector)
| QdrantIndexer |
python | pypa__hatch | src/hatch/project/config.py | {
"start": 515,
"end": 22113
} | class ____:
def __init__(self, root, config, plugin_manager=None):
self.root = root
self.config = config
self.plugin_manager = plugin_manager
self._matrices = None
self._env = None
self._env_requires_complex = None
self._env_requires = None
self._env_collectors = None
self._envs = None
self._internal_envs = None
self._internal_matrices = None
self._matrix_variables = None
self._publish = None
self._scripts = None
self._cached_env_overrides = {}
@cached_property
def build(self):
config = self.config.get("build", {})
if not isinstance(config, dict):
message = "Field `tool.hatch.build` must be a table"
raise TypeError(message)
return BuildConfig(config)
@property
def env(self):
if self._env is None:
config = self.config.get("env", {})
if not isinstance(config, dict):
message = "Field `tool.hatch.env` must be a table"
raise TypeError(message)
self._env = config
return self._env
@property
def env_requires_complex(self) -> list[Dependency]:
if self._env_requires_complex is None:
from hatch.dep.core import Dependency, InvalidDependencyError
requires = self.env.get("requires", [])
if not isinstance(requires, list):
message = "Field `tool.hatch.env.requires` must be an array"
raise TypeError(message)
requires_complex = []
for i, entry in enumerate(requires, 1):
if not isinstance(entry, str):
message = f"Requirement #{i} in `tool.hatch.env.requires` must be a string"
raise TypeError(message)
try:
requires_complex.append(Dependency(entry))
except InvalidDependencyError as e:
message = f"Requirement #{i} in `tool.hatch.env.requires` is invalid: {e}"
raise ValueError(message) from None
self._env_requires_complex = requires_complex
return self._env_requires_complex
@property
def env_requires(self):
if self._env_requires is None:
self._env_requires = [str(r) for r in self.env_requires_complex]
return self._env_requires
@property
def env_collectors(self):
if self._env_collectors is None:
collectors = self.env.get("collectors", {})
if not isinstance(collectors, dict):
message = "Field `tool.hatch.env.collectors` must be a table"
raise TypeError(message)
final_config = {"default": {}}
for collector, config in collectors.items():
if not isinstance(config, dict):
message = f"Field `tool.hatch.env.collectors.{collector}` must be a table"
raise TypeError(message)
final_config[collector] = config
self._env_collectors = final_config
return self._env_collectors
@property
def matrices(self):
if self._matrices is None:
_ = self.envs
return self._matrices
@property
def matrix_variables(self):
if self._matrix_variables is None:
_ = self.envs
return self._matrix_variables
@property
def internal_envs(self):
if self._internal_envs is None:
_ = self.envs
return self._internal_envs
@property
def internal_matrices(self):
if self._internal_matrices is None:
_ = self.envs
return self._internal_matrices
@property
def envs(self):
from hatch.env.internal import get_internal_env_config
from hatch.utils.platform import get_platform_name
if self._envs is None:
env_config = self.config.get("envs", {})
if not isinstance(env_config, dict):
message = "Field `tool.hatch.envs` must be a table"
raise TypeError(message)
config = {}
environment_collectors = []
for collector, collector_config in self.env_collectors.items():
collector_class = self.plugin_manager.environment_collector.get(collector)
if collector_class is None:
message = f"Unknown environment collector: {collector}"
raise ValueError(message)
environment_collector = collector_class(self.root, collector_config)
environment_collectors.append(environment_collector)
for env_name, data in environment_collector.get_initial_config().items():
config.setdefault(env_name, data)
for env_name, data in env_config.items():
if not isinstance(data, dict):
message = f"Field `tool.hatch.envs.{env_name}` must be a table"
raise TypeError(message)
config.setdefault(env_name, {}).update(data)
for environment_collector in environment_collectors:
environment_collector.finalize_config(config)
# Prevent plugins from removing the default environment
ensure_valid_environment(config.setdefault("default", {}))
seen = set()
active = []
for env_name, data in config.items():
_populate_default_env_values(env_name, data, config, seen, active)
current_platform = get_platform_name()
all_matrices = {}
generated_envs = {}
final_config = {}
cached_overrides = {}
for env_name, raw_initial_config in config.items():
current_cached_overrides = cached_overrides[env_name] = {
"platform": [],
"env": [],
"matrix": [],
"name": [],
}
# Only shallow copying is necessary since we just want to modify keys
initial_config = raw_initial_config.copy()
matrix_name_format = initial_config.pop("matrix-name-format", "{value}")
if not isinstance(matrix_name_format, str):
message = f"Field `tool.hatch.envs.{env_name}.matrix-name-format` must be a string"
raise TypeError(message)
if "{value}" not in matrix_name_format:
message = (
f"Field `tool.hatch.envs.{env_name}.matrix-name-format` must "
f"contain at least the `{{value}}` placeholder"
)
raise ValueError(message)
overrides = initial_config.pop("overrides", {})
if not isinstance(overrides, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides` must be a table"
raise TypeError(message)
# Apply any configuration based on the current platform
platform_overrides = overrides.get("platform", {})
if not isinstance(platform_overrides, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.platform` must be a table"
raise TypeError(message)
for platform, options in platform_overrides.items():
if not isinstance(options, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.platform.{platform}` must be a table"
raise TypeError(message)
if platform != current_platform:
continue
apply_overrides(env_name, "platform", platform, current_platform, options, initial_config)
current_cached_overrides["platform"].append((platform, current_platform, options))
# Apply any configuration based on environment variables
env_var_overrides = overrides.get("env", {})
if not isinstance(env_var_overrides, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.env` must be a table"
raise TypeError(message)
for env_var, options in env_var_overrides.items():
if not isinstance(options, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.env.{env_var}` must be a table"
raise TypeError(message)
if env_var not in environ:
continue
apply_overrides(env_name, "env", env_var, environ[env_var], options, initial_config)
current_cached_overrides["env"].append((env_var, environ[env_var], options))
if "matrix" not in initial_config:
final_config[env_name] = initial_config
continue
matrices = initial_config.pop("matrix")
if not isinstance(matrices, list):
message = f"Field `tool.hatch.envs.{env_name}.matrix` must be an array"
raise TypeError(message)
matrix_overrides = overrides.get("matrix", {})
if not isinstance(matrix_overrides, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.matrix` must be a table"
raise TypeError(message)
name_overrides = overrides.get("name", {})
if not isinstance(name_overrides, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.name` must be a table"
raise TypeError(message)
matrix_data = all_matrices[env_name] = {"config": deepcopy(initial_config)}
all_envs = matrix_data["envs"] = {}
for i, raw_matrix in enumerate(matrices, 1):
matrix = raw_matrix
if not isinstance(matrix, dict):
message = f"Entry #{i} in field `tool.hatch.envs.{env_name}.matrix` must be a table"
raise TypeError(message)
if not matrix:
message = f"Matrix #{i} in field `tool.hatch.envs.{env_name}.matrix` cannot be empty"
raise ValueError(message)
for j, (variable, values) in enumerate(matrix.items(), 1):
if not variable:
message = (
f"Variable #{j} in matrix #{i} in field `tool.hatch.envs.{env_name}.matrix` "
f"cannot be an empty string"
)
raise ValueError(message)
if not isinstance(values, list):
message = (
f"Variable `{variable}` in matrix #{i} in field `tool.hatch.envs.{env_name}.matrix` "
f"must be an array"
)
raise TypeError(message)
if not values:
message = (
f"Variable `{variable}` in matrix #{i} in field `tool.hatch.envs.{env_name}.matrix` "
f"cannot be empty"
)
raise ValueError(message)
existing_values = set()
for k, value in enumerate(values, 1):
if not isinstance(value, str):
message = (
f"Value #{k} of variable `{variable}` in matrix #{i} in field "
f"`tool.hatch.envs.{env_name}.matrix` must be a string"
)
raise TypeError(message)
if not value:
message = (
f"Value #{k} of variable `{variable}` in matrix #{i} in field "
f"`tool.hatch.envs.{env_name}.matrix` cannot be an empty string"
)
raise ValueError(message)
if value in existing_values:
message = (
f"Value #{k} of variable `{variable}` in matrix #{i} in field "
f"`tool.hatch.envs.{env_name}.matrix` is a duplicate"
)
raise ValueError(message)
existing_values.add(value)
variables = {}
# Ensure that any Python variable comes first
python_selected = False
for variable in ("py", "python"):
if variable in matrix:
if python_selected:
message = (
f"Matrix #{i} in field `tool.hatch.envs.{env_name}.matrix` "
f"cannot contain both `py` and `python` variables"
)
raise ValueError(message)
python_selected = True
# Only shallow copying is necessary since we just want to remove a key
matrix = matrix.copy()
variables[variable] = matrix.pop(variable)
variables.update(matrix)
for result in product(*variables.values()):
# Make a value mapping for easy referencing
variable_values = dict(zip(variables, result, strict=False))
# Create the environment's initial configuration
new_config = deepcopy(initial_config)
cached_matrix_overrides = []
# Apply any configuration based on matrix variables
for variable, options in matrix_overrides.items():
if not isinstance(options, dict):
message = (
f"Field `tool.hatch.envs.{env_name}.overrides.matrix.{variable}` must be a table"
)
raise TypeError(message)
if variable not in variables:
continue
apply_overrides(
env_name, "matrix", variable, variable_values[variable], options, new_config
)
cached_matrix_overrides.append((variable, variable_values[variable], options))
# Construct the environment name
final_matrix_name_format = new_config.pop("matrix-name-format", matrix_name_format)
env_name_parts = []
for j, (variable, value) in enumerate(variable_values.items()):
if j == 0 and python_selected:
new_config["python"] = value
env_name_parts.append(value if value.startswith("py") else f"py{value}")
else:
env_name_parts.append(final_matrix_name_format.format(variable=variable, value=value))
new_env_name = "-".join(env_name_parts)
cached_name_overrides = []
# Apply any configuration based on the final name, minus the prefix for non-default environments
for pattern, options in name_overrides.items():
if not isinstance(options, dict):
message = f"Field `tool.hatch.envs.{env_name}.overrides.name.{pattern}` must be a table"
raise TypeError(message)
if not re.search(pattern, new_env_name):
continue
apply_overrides(env_name, "name", pattern, new_env_name, options, new_config)
cached_name_overrides.append((pattern, new_env_name, options))
if env_name != "default":
new_env_name = f"{env_name}.{new_env_name}"
# Save the generated environment
final_config[new_env_name] = new_config
cached_overrides[new_env_name] = {
"platform": current_cached_overrides["platform"],
"env": current_cached_overrides["env"],
"matrix": cached_matrix_overrides,
"name": cached_name_overrides,
}
all_envs[new_env_name] = variable_values
if "py" in variable_values:
all_envs[new_env_name] = {"python": variable_values.pop("py"), **variable_values}
# Remove the root matrix generator
del cached_overrides[env_name]
# Save the variables used to generate the environments
generated_envs.update(all_envs)
for environment_collector in environment_collectors:
environment_collector.finalize_environments(final_config)
self._matrices = all_matrices
self._internal_matrices = {}
self._envs = final_config
self._matrix_variables = generated_envs
self._cached_env_overrides.update(cached_overrides)
# Extract the internal environments
self._internal_envs = {}
for internal_name in get_internal_env_config():
try:
self._internal_envs[internal_name] = self._envs.pop(internal_name)
# Matrix
except KeyError:
self._internal_matrices[internal_name] = self._matrices.pop(internal_name)
for env_name in [env_name for env_name in self._envs if env_name.startswith(f"{internal_name}.")]:
self._internal_envs[env_name] = self._envs.pop(env_name)
return self._envs
@property
def publish(self):
if self._publish is None:
config = self.config.get("publish", {})
if not isinstance(config, dict):
message = "Field `tool.hatch.publish` must be a table"
raise TypeError(message)
for publisher, data in config.items():
if not isinstance(data, dict):
message = f"Field `tool.hatch.publish.{publisher}` must be a table"
raise TypeError(message)
self._publish = config
return self._publish
@property
def scripts(self):
if self._scripts is None:
script_config = self.config.get("scripts", {})
if not isinstance(script_config, dict):
message = "Field `tool.hatch.scripts` must be a table"
raise TypeError(message)
config = {}
for name, data in script_config.items():
if " " in name:
message = f"Script name `{name}` in field `tool.hatch.scripts` must not contain spaces"
raise ValueError(message)
commands = []
if isinstance(data, str):
commands.append(data)
elif isinstance(data, list):
for i, command in enumerate(data, 1):
if not isinstance(command, str):
message = f"Command #{i} in field `tool.hatch.scripts.{name}` must be a string"
raise TypeError(message)
commands.append(command)
else:
message = f"Field `tool.hatch.scripts.{name}` must be a string or an array of strings"
raise TypeError(message)
config[name] = commands
seen = {}
active = []
for script_name, commands in config.items():
commands[:] = expand_script_commands(script_name, commands, config, seen, active)
self._scripts = config
return self._scripts
def finalize_env_overrides(self, option_types):
# We lazily apply overrides because we need type information potentially defined by
# environment plugins for their options
if not self._cached_env_overrides:
return
for environments in (self.envs, self.internal_envs):
for env_name, config in environments.items():
for override_name, data in self._cached_env_overrides.get(env_name, {}).items():
for condition, condition_value, options in data:
apply_overrides(
env_name, override_name, condition, condition_value, options, config, option_types
)
self._cached_env_overrides.clear()
| ProjectConfig |
python | numpy__numpy | numpy/lib/_index_tricks_impl.py | {
"start": 10299,
"end": 14875
} | class ____:
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
__slots__ = ('axis', 'matrix', 'ndmin', 'trans1d')
# allow ma.mr_ to override this
concatenate = staticmethod(_nx.concatenate)
makemat = staticmethod(matrixlib.matrix)
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self.axis = axis
self.matrix = matrix
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
# handle matrix builder syntax
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
# copy attributes, since they can be overridden in the first argument
trans1d = self.trans1d
ndmin = self.ndmin
matrix = self.matrix
axis = self.axis
objs = []
# dtypes or scalars for weak scalar handling in result_type
result_type_objs = []
for k, item in enumerate(key):
scalar = False
if isinstance(item, slice):
step = item.step
start = item.start
stop = item.stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, (_nx.complexfloating, complex)):
size = int(abs(step))
newobj = linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=None, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(item, str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
if item in ('r', 'c'):
matrix = True
col = (item == 'c')
continue
if ',' in item:
vec = item.split(',')
try:
axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except Exception as e:
raise ValueError(
f"unknown special directive {item!r}"
) from e
try:
axis = int(item)
continue
except (ValueError, TypeError) as e:
raise ValueError("unknown special directive") from e
elif type(item) in ScalarType:
scalar = True
newobj = item
else:
item_ndim = np.ndim(item)
newobj = array(item, copy=None, subok=True, ndmin=ndmin)
if trans1d != -1 and item_ndim < ndmin:
k2 = ndmin - item_ndim
k1 = trans1d
if k1 < 0:
k1 += k2 + 1
defaxes = list(range(ndmin))
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
objs.append(newobj)
if scalar:
result_type_objs.append(item)
else:
result_type_objs.append(newobj.dtype)
# Ensure that scalars won't up-cast unless warranted, for 0, drops
# through to error in concatenate.
if len(result_type_objs) != 0:
final_dtype = _nx.result_type(*result_type_objs)
# concatenate could do cast, but that can be overridden:
objs = [array(obj, copy=None, subok=True,
ndmin=ndmin, dtype=final_dtype) for obj in objs]
res = self.concatenate(tuple(objs), axis=axis)
if matrix:
oldndim = res.ndim
res = self.makemat(res)
if oldndim == 1 and col:
res = res.T
return res
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatenator(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
| AxisConcatenator |
python | optuna__optuna | optuna/visualization/_optimization_history.py | {
"start": 787,
"end": 923
} | class ____(NamedTuple):
values: list[float]
stds: list[float] | None
label_name: str
states: list[_ValueState]
| _ValuesInfo |
python | huggingface__transformers | src/transformers/models/ovis2/processing_ovis2.py | {
"start": 966,
"end": 1145
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
},
"image_kwargs": {},
}
| Ovis2ProcessorKwargs |
python | apache__airflow | devel-common/src/tests_common/test_utils/timetables.py | {
"start": 1235,
"end": 2107
} | class ____(Timetable):
"""Custom timetable for testing serialization."""
def __init__(self, value: str):
self.value = value
@classmethod
def deserialize(cls, data):
return cls(data["value"])
def __eq__(self, other) -> bool:
"""Only for testing purposes."""
if not isinstance(other, CustomSerializationTimetable):
return False
return self.value == other.value
def __hash__(self):
return hash(self.value)
def serialize(self):
return {"value": self.value}
@property
def summary(self):
return f"{type(self).__name__}({self.value!r})"
def infer_manual_data_interval(self, *, run_after):
raise DataInterval.exact(run_after)
def next_dagrun_info(self, *, last_automated_data_interval, restriction):
return None
| CustomSerializationTimetable |
python | ray-project__ray | rllib/algorithms/bc/torch/default_bc_torch_rl_module.py | {
"start": 432,
"end": 1712
} | class ____(TorchRLModule, abc.ABC):
"""The default TorchRLModule used, if no custom RLModule is provided.
Builds an encoder net based on the observation space.
Builds a pi head based on the action space.
Passes observations from the input batch through the encoder, then the pi head to
compute action logits.
"""
def __init__(self, *args, **kwargs):
catalog_class = kwargs.pop("catalog_class", None)
if catalog_class is None:
catalog_class = BCCatalog
super().__init__(*args, **kwargs, catalog_class=catalog_class)
@override(RLModule)
def setup(self):
# Build model components (encoder and pi head) from catalog.
super().setup()
self._encoder = self.catalog.build_encoder(framework=self.framework)
self._pi_head = self.catalog.build_pi_head(framework=self.framework)
@override(TorchRLModule)
def _forward(self, batch: Dict, **kwargs) -> Dict[str, Any]:
"""Generic BC forward pass (for all phases of training/evaluation)."""
# Encoder embeddings.
encoder_outs = self._encoder(batch)
# Action dist inputs.
return {
Columns.ACTION_DIST_INPUTS: self._pi_head(encoder_outs[ENCODER_OUT]),
}
| DefaultBCTorchRLModule |
python | Netflix__metaflow | metaflow/metadata_provider/metadata.py | {
"start": 1448,
"end": 2170
} | class ____:
# Consider this list a constant that should never change.
# Lots of code depend on the membership of this list as
# well as exact ordering
_order_as_list = [
"root",
"flow",
"run",
"step",
"task",
"artifact",
"metadata",
"self",
]
_order_as_dict = {v: i for i, v in enumerate(_order_as_list)}
@staticmethod
def order_to_type(order):
if order < len(ObjectOrder._order_as_list):
return ObjectOrder._order_as_list[order]
return None
@staticmethod
def type_to_order(obj_type):
return ObjectOrder._order_as_dict.get(obj_type)
@with_metaclass(MetadataProviderMeta)
| ObjectOrder |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 102134,
"end": 105880
} | class ____(Request):
"""
Get a list of distinct values for the chosen model metadata key
:param projects: Project IDs
:type projects: Sequence[str]
:param key: Metadata key
:type key: str
:param allow_public: If set to 'true' then collect values from both company and
public models otherwise company modeels only. The default is 'true'
:type allow_public: bool
:param include_subprojects: If set to 'true' and the project field is set then
the result includes metadata values from the subproject models
:type include_subprojects: bool
"""
_service = "projects"
_action = "get_model_metadata_values"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"allow_public": {
"description": "If set to 'true' then collect values from both company and public models otherwise company modeels only. The default is 'true'",
"type": "boolean",
},
"include_subprojects": {
"default": True,
"description": "If set to 'true' and the project field is set then the result includes metadata values from the subproject models",
"type": "boolean",
},
"key": {"description": "Metadata key", "type": "string"},
"projects": {
"description": "Project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"required": ["key"],
"type": "object",
}
def __init__(
self,
key: str,
projects: Optional[List[str]] = None,
allow_public: Optional[bool] = None,
include_subprojects: Optional[bool] = True,
**kwargs: Any
) -> None:
super(GetModelMetadataValuesRequest, self).__init__(**kwargs)
self.projects = projects
self.key = key
self.allow_public = allow_public
self.include_subprojects = include_subprojects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("key")
def key(self) -> str:
return self._property_key
@key.setter
def key(self, value: str) -> None:
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("allow_public")
def allow_public(self) -> Optional[bool]:
return self._property_allow_public
@allow_public.setter
def allow_public(self, value: Optional[bool]) -> None:
if value is None:
self._property_allow_public = None
return
self.assert_isinstance(value, "allow_public", (bool,))
self._property_allow_public = value
@schema_property("include_subprojects")
def include_subprojects(self) -> Optional[bool]:
return self._property_include_subprojects
@include_subprojects.setter
def include_subprojects(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_subprojects = None
return
self.assert_isinstance(value, "include_subprojects", (bool,))
self._property_include_subprojects = value
| GetModelMetadataValuesRequest |
python | django__django | tests/composite_pk/test_delete.py | {
"start": 78,
"end": 3235
} | class ____(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tenant_1 = Tenant.objects.create()
cls.tenant_2 = Tenant.objects.create()
cls.user_1 = User.objects.create(
tenant=cls.tenant_1,
id=1,
email="user0001@example.com",
)
cls.user_2 = User.objects.create(
tenant=cls.tenant_2,
id=2,
email="user0002@example.com",
)
cls.comment_1 = Comment.objects.create(id=1, user=cls.user_1)
cls.comment_2 = Comment.objects.create(id=2, user=cls.user_2)
cls.comment_3 = Comment.objects.create(id=3, user=cls.user_2)
def test_delete_tenant_by_pk(self):
result = Tenant.objects.filter(pk=self.tenant_1.pk).delete()
self.assertEqual(
result,
(
3,
{
"composite_pk.Comment": 1,
"composite_pk.User": 1,
"composite_pk.Tenant": 1,
},
),
)
self.assertIs(Tenant.objects.filter(pk=self.tenant_1.pk).exists(), False)
self.assertIs(Tenant.objects.filter(pk=self.tenant_2.pk).exists(), True)
self.assertIs(User.objects.filter(pk=self.user_1.pk).exists(), False)
self.assertIs(User.objects.filter(pk=self.user_2.pk).exists(), True)
self.assertIs(Comment.objects.filter(pk=self.comment_1.pk).exists(), False)
self.assertIs(Comment.objects.filter(pk=self.comment_2.pk).exists(), True)
self.assertIs(Comment.objects.filter(pk=self.comment_3.pk).exists(), True)
def test_delete_user_by_pk(self):
result = User.objects.filter(pk=self.user_1.pk).delete()
self.assertEqual(
result, (2, {"composite_pk.User": 1, "composite_pk.Comment": 1})
)
self.assertIs(User.objects.filter(pk=self.user_1.pk).exists(), False)
self.assertIs(User.objects.filter(pk=self.user_2.pk).exists(), True)
self.assertIs(Comment.objects.filter(pk=self.comment_1.pk).exists(), False)
self.assertIs(Comment.objects.filter(pk=self.comment_2.pk).exists(), True)
self.assertIs(Comment.objects.filter(pk=self.comment_3.pk).exists(), True)
def test_delete_comments_by_user(self):
result = Comment.objects.filter(user=self.user_2).delete()
self.assertEqual(result, (2, {"composite_pk.Comment": 2}))
self.assertIs(Comment.objects.filter(pk=self.comment_1.pk).exists(), True)
self.assertIs(Comment.objects.filter(pk=self.comment_2.pk).exists(), False)
self.assertIs(Comment.objects.filter(pk=self.comment_3.pk).exists(), False)
def test_delete_without_pk(self):
msg = (
"Comment object can't be deleted because its pk attribute is set "
"to None."
)
with self.assertRaisesMessage(ValueError, msg):
Comment().delete()
with self.assertRaisesMessage(ValueError, msg):
Comment(tenant_id=1).delete()
with self.assertRaisesMessage(ValueError, msg):
Comment(id=1).delete()
| CompositePKDeleteTests |
python | huggingface__transformers | tests/models/gemma3/test_modeling_gemma3.py | {
"start": 2180,
"end": 11747
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Gemma3TextModelTester
_is_stateful = True
model_split_percents = [0.5, 0.6]
@unittest.skip("Gemma3 applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3 applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip("Gemma3 applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3 applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
"Gemma3 has no base model prefix which causes issues when loading base model from saved task model checkpoint"
)
def test_load_with_mismatched_shapes(self):
pass
def test_generation_beyond_sliding_window_tiny_model(self):
"""Test generation with a tiny randomly initialised model whose input length is larger than the `sliding_window`.
The model is configured with both `full_attention` and `sliding_attention` layers to make sure the hybrid cache
and mask slicing logic is covered.
"""
config = Gemma3TextConfig.from_pretrained("hf-internal-testing/tiny-random-Gemma3ForCausalLM")
config.attn_implementation = "eager"
config.layer_types = ["full_attention", "sliding_attention"]
config.sliding_window = 8
config.max_position_embeddings = 128
config.rope_parameters = {
"full_attention": {"rope_type": "default", "rope_theta": 1000000},
"sliding_attention": {"rope_type": "default", "rope_theta": 10000},
}
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-Gemma3ForCausalLM", config=config
).to(torch_device)
input_len = 10
input_ids = torch.tensor(
[
[42300, 241087, 255445, 81315, 193760, 184471, 67719, 98191, 210651, 124725],
[102294, 205314, 226646, 62020, 60245, 68025, 251839, 114053, 4695, 175511],
],
device=torch_device,
)
attention_mask = torch.ones_like(input_ids).to(torch_device)
with torch.no_grad():
_ = model.generate(
input_ids,
attention_mask=attention_mask,
max_new_tokens=1,
do_sample=False,
use_cache=True,
disable_compile=True,
)
# 2 generations are needed to trigger https://github.com/huggingface/transformers/issues/39711
# Since it requires model._cache to have been previously initialized
output = model.generate(
input_ids,
attention_mask=attention_mask,
max_new_tokens=5,
do_sample=False,
use_cache=True,
disable_compile=True,
)
generated_sequences = output[:, input_len:].cpu()
EXPECTED_OUTPUT = torch.tensor([[90109, 90109, 90109, 83191, 83191], [246901, 69832, 69832, 69832, 62288]])
torch.testing.assert_close(generated_sequences, EXPECTED_OUTPUT)
@parameterized.expand([("linear",), ("dynamic",), ("yarn",)])
@unittest.skip("TODO (joao): check why this is failing")
def test_model_rope_scaling_from_config(self):
pass
def test_model_rope_scaling_frequencies(self):
"""Tests the frequency properties of the different RoPE scaling types on the model RoPE layer."""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Retrieves the RoPE layer class from the base model class. Uses `.named_modules()` to avoid hardcoding the
# named location of the RoPE layer class.
base_model = self.model_tester.base_model_class(config)
possible_rope_attributes = [
"pos_emb",
"rotary_emb", # most common case
"global_rotary_emb",
"local_rotary_emb",
]
for name, module in base_model.named_modules():
if any(potential_name in name for potential_name in possible_rope_attributes):
rope_class = type(module)
break
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
rope_params = {"rope_type": "default", "rope_theta": 10_000.0}
config.rope_parameters = {"full_attention": rope_params, "sliding_attention": rope_params}
original_rope = rope_class(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short, layer_type="sliding_attention")
original_cos_long, original_sin_long = original_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
rope_params = {"rope_type": "linear", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"full_attention": rope_params, "sliding_attention": rope_params}
linear_scaling_rope = rope_class(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
rope_params = {"rope_type": "dynamic", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"full_attention": rope_params, "sliding_attention": rope_params}
ntk_scaling_rope = rope_class(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue(
(ntk_scaling_rope.sliding_attention_inv_freq <= original_rope.sliding_attention_inv_freq).all()
)
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
rope_params = {"rope_type": "yarn", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"full_attention": rope_params, "sliding_attention": rope_params}
yarn_scaling_rope = rope_class(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
| Gemma3TextModelTest |
python | apache__airflow | providers/databricks/tests/unit/databricks/utils/test_mixins.py | {
"start": 1008,
"end": 2184
} | class ____(DatabricksSQLStatementsMixin):
def __init__(self):
self.databricks_conn_id = "databricks_conn_id"
self.databricks_retry_limit = 3
self.databricks_retry_delay = 60
self.databricks_retry_args = None
self.polling_period_seconds = 10
self.statement_id = "statement_id"
self.task_id = "task_id"
self.timeout = 60
# Utilities
self._hook = MagicMock()
self.defer = MagicMock()
self.log = MagicMock()
@pytest.fixture
def databricks_sql_statements():
return DatabricksSQLStatements()
@pytest.fixture
def terminal_success_state():
terminal_success_state = MagicMock()
terminal_success_state.is_terminal = True
terminal_success_state.is_successful = True
return terminal_success_state
@pytest.fixture
def terminal_failure_state():
terminal_fail_state = MagicMock()
terminal_fail_state.is_terminal = True
terminal_fail_state.is_successful = False
terminal_fail_state.state = "FAILED"
terminal_fail_state.error_code = "123"
terminal_fail_state.error_message = "Query failed"
return terminal_fail_state
| DatabricksSQLStatements |
python | kubernetes-client__python | kubernetes/client/api/resource_v1_api.py | {
"start": 543,
"end": 450263
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_device_class(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_device_class_with_http_info(body, **kwargs) # noqa: E501
def create_device_class_with_http_info(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim_template(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_template_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_template_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_resource_slice(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_resource_slice_with_http_info(body, **kwargs) # noqa: E501
def create_resource_slice_with_http_info(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_device_class(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_device_class_with_http_info(**kwargs) # noqa: E501
def delete_collection_device_class_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_resource_slice(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_resource_slice_with_http_info(**kwargs) # noqa: E501
def delete_collection_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_device_class(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_device_class_with_http_info(name, **kwargs) # noqa: E501
def delete_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_resource_slice(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def delete_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_device_class(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_device_class_with_http_info(**kwargs) # noqa: E501
def list_device_class_with_http_info(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_template_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_template_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_template_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_template_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_slice(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSliceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_slice_with_http_info(**kwargs) # noqa: E501
def list_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSliceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSliceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_device_class(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def patch_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_resource_slice(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def patch_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_device_class(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_device_class_with_http_info(name, **kwargs) # noqa: E501
def read_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_resource_slice(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def read_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_device_class(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def replace_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/deviceclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceV1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param ResourceV1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceV1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaims/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceV1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_resource_slice(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def replace_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1/resourceslices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| ResourceV1Api |
python | tiangolo__fastapi | docs_src/sql_databases/tutorial002.py | {
"start": 406,
"end": 448
} | class ____(HeroBase):
id: int
| HeroPublic |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 223216,
"end": 226498
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3]", L_v_: "f32[3, 3]"):
l_x_ = L_x_
l_v_ = L_v_
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_set_fwd_grad_enabled_2 = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled_2 = None
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled_3 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_3 = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
_make_dual: "f32[3, 3]" = torch._make_dual(l_x_, l_v_, level = 0); l_x_ = l_v_ = None
sin: "f32[3, 3]" = _make_dual.sin(); _make_dual = None
result_duals: "f32[]" = sin.sum(); sin = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[]" = _unpack_dual[0]
dual: "f32[]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(primal, 1); primal = None
tangents_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(dual, 1); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_4 = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled_4 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
_set_fwd_grad_enabled_5 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_5 = None
_set_fwd_grad_enabled_6 = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled_6 = None
_set_fwd_grad_enabled_7 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_7 = None
return (primals_out_unflatten, tangents_out_unflatten)
""",
)
def test_jvp_freevar_tensor(self):
counters.clear()
y = torch.randn(3, 3)
def fn(x):
return (x.sin() + y).sum()
def wrapper_fn(x):
return torch.func.jvp(fn, (x,), (x,))
x = torch.randn(3, 3)
expected = wrapper_fn(x)
actual = torch.compile(wrapper_fn, backend="aot_eager", fullgraph=True)(x)
self.assertEqual(actual, expected)
def test_jvp_jvp(self):
counters.clear()
if check_dynamic_shape_capture():
self.skipTest("test fails with dynamic shapes")
def fn(x):
return torch.func.jvp(torch.sin, (x,), (x,))
def wrapper_fn(x):
return torch.func.jvp(fn, (x,), (x,))
x = torch.randn(3, 3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | pennersr__django-allauth | allauth/socialaccount/providers/spotify/provider.py | {
"start": 581,
"end": 1129
} | class ____(OAuth2Provider):
id = "spotify"
name = "Spotify"
account_class = SpotifyAccount
oauth2_adapter_class = SpotifyOAuth2Adapter
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(name=data.get("display_name"), email=data.get("email"))
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append("user-read-email")
return scope
provider_classes = [SpotifyOAuth2Provider]
| SpotifyOAuth2Provider |
python | mlflow__mlflow | mlflow/store/model_registry/dbmodels/models.py | {
"start": 926,
"end": 2364
} | class ____(Base):
__tablename__ = "registered_models"
name = Column(String(256), unique=True, nullable=False)
creation_time = Column(BigInteger, default=get_current_time_millis)
last_updated_time = Column(BigInteger, nullable=True, default=None)
description = Column(String(5000), nullable=True)
__table_args__ = (PrimaryKeyConstraint("name", name="registered_model_pk"),)
def __repr__(self):
return (
f"<SqlRegisteredModel ({self.name}, {self.description}, "
f"{self.creation_time}, {self.last_updated_time})>"
)
def to_mlflow_entity(self):
# SqlRegisteredModel has backref to all "model_versions". Filter latest for each stage.
latest_versions = {}
for mv in self.model_versions:
stage = mv.current_stage
if stage != STAGE_DELETED_INTERNAL and (
stage not in latest_versions or latest_versions[stage].version < mv.version
):
latest_versions[stage] = mv
return RegisteredModel(
self.name,
self.creation_time,
self.last_updated_time,
self.description,
[mvd.to_mlflow_entity() for mvd in latest_versions.values()],
[tag.to_mlflow_entity() for tag in self.registered_model_tags],
[alias.to_mlflow_entity() for alias in self.registered_model_aliases],
)
| SqlRegisteredModel |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 1130,
"end": 1286
} | class ____(ApeException, IndexError):
"""
An exception that is also an IndexError.
Useful for nicely displaying IndexErrors.
"""
| ApeIndexError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image28.py | {
"start": 315,
"end": 900
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image28.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
0, 6, self.image_dir + "red_208.png", {"x_offset": 46, "y_offset": 1}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | tests/sentry/tasks/test_clear_expired_rulesnoozes.py | {
"start": 262,
"end": 3213
} | class ____(APITestCase):
def setUp(self) -> None:
self.issue_alert_rule = Rule.objects.create(
label="test rule", project=self.project, owner_team=self.team
)
self.metric_alert_rule = self.create_alert_rule(
organization=self.project.organization, projects=[self.project]
)
self.until = datetime.now(timezone.utc) - timedelta(minutes=1)
self.login_as(user=self.user)
def test_task_persistent_name(self) -> None:
assert clear_expired_rulesnoozes.name == "sentry.tasks.clear_expired_rulesnoozes"
def test_simple(self) -> None:
"""Test that expired rulesnoozes are deleted, and ones that still have time left are left alone"""
issue_alert_rule_snooze = self.snooze_rule(
user_id=self.user.id,
rule=self.issue_alert_rule,
owner_id=self.user.id,
until=self.until,
date_added=datetime.now(timezone.utc),
)
issue_alert_rule_snooze2 = self.snooze_rule(
rule=self.issue_alert_rule,
owner_id=self.user.id,
until=datetime.now(timezone.utc) + timedelta(minutes=1),
date_added=datetime.now(timezone.utc),
)
metric_alert_rule_snooze = self.snooze_rule(
user_id=self.user.id,
alert_rule=self.metric_alert_rule,
owner_id=self.user.id,
until=self.until,
date_added=datetime.now(timezone.utc),
)
metric_alert_rule_snooze2 = self.snooze_rule(
alert_rule=self.metric_alert_rule,
owner_id=self.user.id,
until=datetime.now(timezone.utc) + timedelta(minutes=1),
date_added=datetime.now(timezone.utc),
)
clear_expired_rulesnoozes()
assert not RuleSnooze.objects.filter(id=issue_alert_rule_snooze.id).exists()
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze2.id).exists()
assert not RuleSnooze.objects.filter(id=metric_alert_rule_snooze.id).exists()
assert RuleSnooze.objects.filter(id=metric_alert_rule_snooze2.id).exists()
def test_snooze_forever(self) -> None:
"""Test that if an issue alert rule is snoozed forever, the task doesn't remove it."""
issue_alert_rule_snooze = self.snooze_rule(
user_id=self.user.id,
rule=self.issue_alert_rule,
owner_id=self.user.id,
date_added=datetime.now(timezone.utc),
)
metric_alert_rule_snooze = self.snooze_rule(
user_id=self.user.id,
alert_rule=self.metric_alert_rule,
owner_id=self.user.id,
date_added=datetime.now(timezone.utc),
)
clear_expired_rulesnoozes()
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze.id).exists()
assert RuleSnooze.objects.filter(id=metric_alert_rule_snooze.id).exists()
| ClearExpiredRuleSnoozesTest |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 51732,
"end": 51963
} | class ____(Text):
""" Base class for math text glyphs.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| MathTextGlyph |
python | django__django | tests/fixtures_regress/models.py | {
"start": 7460,
"end": 7609
} | class ____(BaseNKModel):
c_set = models.ManyToManyField(
"M2MComplexCircular1C", through="M2MCircular1ThroughBC"
)
| M2MComplexCircular1B |
python | Netflix__metaflow | metaflow/plugins/gcp/gs_storage_client_factory.py | {
"start": 540,
"end": 2110
} | class ____(object):
name = "gcp-default"
@staticmethod
def get_gs_storage_client(*args, **kwargs):
return _get_gs_storage_client_default()
@staticmethod
def get_credentials(scopes, *args, **kwargs):
import google.auth
return google.auth.default(scopes=scopes)
cached_provider_class = None
def get_gs_storage_client():
global cached_provider_class
if cached_provider_class is None:
from metaflow.metaflow_config import DEFAULT_GCP_CLIENT_PROVIDER
from metaflow.plugins import GCP_CLIENT_PROVIDERS
for p in GCP_CLIENT_PROVIDERS:
if p.name == DEFAULT_GCP_CLIENT_PROVIDER:
cached_provider_class = p
break
else:
raise ValueError(
"Cannot find GCP Client provider %s" % DEFAULT_GCP_CLIENT_PROVIDER
)
return cached_provider_class.get_gs_storage_client()
def get_credentials(scopes, *args, **kwargs):
global cached_provider_class
if cached_provider_class is None:
from metaflow.metaflow_config import DEFAULT_GCP_CLIENT_PROVIDER
from metaflow.plugins import GCP_CLIENT_PROVIDERS
for p in GCP_CLIENT_PROVIDERS:
if p.name == DEFAULT_GCP_CLIENT_PROVIDER:
cached_provider_class = p
break
else:
raise ValueError(
"Cannot find GCP Client provider %s" % DEFAULT_GCP_CLIENT_PROVIDER
)
return cached_provider_class.get_credentials(scopes, *args, **kwargs)
| GcpDefaultClientProvider |
python | spulec__freezegun | freezegun/api.py | {
"start": 19178,
"end": 40478
} | class ____:
"""
A class to freeze time for testing purposes.
This class can be used as a context manager or a decorator to freeze time
during the execution of a block of code or a function. It provides various
options to customize the behavior of the frozen time.
Attributes:
time_to_freeze (datetime.datetime): The datetime to freeze time at.
tz_offset (datetime.timedelta): The timezone offset to apply to the frozen time.
ignore (List[str]): A list of module names to ignore when freezing time.
tick (bool): Whether to allow time to tick forward.
auto_tick_seconds (float): The number of seconds to auto-tick the frozen time.
undo_changes (List[Tuple[types.ModuleType, str, Any]]): A list of changes to undo when stopping the frozen time.
modules_at_start (Set[str]): A set of module names that were loaded at the start of freezing time.
as_arg (bool): Whether to pass the frozen time as an argument to the decorated function.
as_kwarg (str): The name of the keyword argument to pass the frozen time to the decorated function.
real_asyncio (Optional[bool]): Whether to allow asyncio event loops to see real monotonic time.
Methods:
__call__(func): Decorates a function or class to freeze time during its execution.
decorate_class(klass): Decorates a class to freeze time during its execution.
__enter__(): Starts freezing time and returns the time factory.
__exit__(*args): Stops freezing time.
start(): Starts freezing time and returns the time factory.
stop(): Stops freezing time and restores the original time functions.
decorate_coroutine(coroutine): Decorates a coroutine to freeze time during its execution.
decorate_callable(func): Decorates a callable to freeze time during its execution.
"""
def __init__(
self,
time_to_freeze_str: Optional[_Freezable],
tz_offset: Union[int, datetime.timedelta],
ignore: List[str],
tick: bool,
as_arg: bool,
as_kwarg: str,
auto_tick_seconds: float,
real_asyncio: Optional[bool],
):
self.time_to_freeze = _parse_time_to_freeze(time_to_freeze_str)
self.tz_offset = _parse_tz_offset(tz_offset)
self.ignore = tuple(ignore)
self.tick = tick
self.auto_tick_seconds = auto_tick_seconds
self.undo_changes: List[Tuple[types.ModuleType, str, Any]] = []
self.modules_at_start: Set[str] = set()
self.as_arg = as_arg
self.as_kwarg = as_kwarg
self.real_asyncio = real_asyncio
# mypy objects to this because Type is Callable, but Pytype needs it because
# (unlike mypy's) its inference does not assume class decorators always leave
# the type unchanged.
@overload
def __call__(self, func: Type[T2]) -> Type[T2]: # type: ignore[overload-overlap]
...
@overload
def __call__(self, func: "Callable[P, Awaitable[Any]]") -> "Callable[P, Awaitable[Any]]":
...
@overload
def __call__(self, func: "Callable[P, T]") -> "Callable[P, T]":
...
def __call__(self, func: Union[Type[T2], "Callable[P, Awaitable[Any]]", "Callable[P, T]"]) -> Union[Type[T2], "Callable[P, Awaitable[Any]]", "Callable[P, T]"]: # type: ignore
if inspect.isclass(func):
return self.decorate_class(func)
elif inspect.iscoroutinefunction(func):
return self.decorate_coroutine(func)
elif inspect.isgeneratorfunction(func):
return self.decorate_generator_function(func) # type: ignore
return self.decorate_callable(func) # type: ignore
def decorate_class(self, klass: Type[T2]) -> Type[T2]:
if issubclass(klass, unittest.TestCase):
# If it's a TestCase, we freeze time around setup and teardown, as well
# as for every test case. This requires some care to avoid freezing
# the time pytest sees, as otherwise this would distort the reported
# timings.
orig_setUpClass = klass.setUpClass
orig_tearDownClass = klass.tearDownClass
# noinspection PyDecorator
@classmethod # type: ignore
def setUpClass(cls: type) -> None:
self.start()
if orig_setUpClass is not None:
orig_setUpClass()
self.stop()
# noinspection PyDecorator
@classmethod # type: ignore
def tearDownClass(cls: type) -> None:
self.start()
if orig_tearDownClass is not None:
orig_tearDownClass()
self.stop()
klass.setUpClass = setUpClass # type: ignore
klass.tearDownClass = tearDownClass # type: ignore
orig_setUp = klass.setUp
orig_tearDown = klass.tearDown
def setUp(*args: Any, **kwargs: Any) -> None:
self.start()
if orig_setUp is not None:
orig_setUp(*args, **kwargs)
def tearDown(*args: Any, **kwargs: Any) -> None:
if orig_tearDown is not None:
orig_tearDown(*args, **kwargs)
self.stop()
klass.setUp = setUp # type: ignore[method-assign]
klass.tearDown = tearDown # type: ignore[method-assign]
else:
seen = set()
klasses = klass.mro()
for base_klass in klasses:
for (attr, attr_value) in base_klass.__dict__.items():
if attr.startswith('_') or attr in seen:
continue
seen.add(attr)
if not callable(attr_value) or inspect.isclass(attr_value) or isinstance(attr_value, staticmethod):
continue
try:
if attr_value.__dict__.get("_pytestfixturefunction") and hasattr(attr_value, "__pytest_wrapped__"):
# PYTEST==8.2.x (and maybe others)
# attr_value is a pytest fixture
# In other words: attr_value == fixture(original_method)
# We need to keep the fixture itself intact to ensure pytest still treats it as a fixture
# We still want to freeze time inside the original_method though
attr_value.__pytest_wrapped__.obj = self(attr_value.__pytest_wrapped__.obj)
elif attr_value.__dict__.get("_fixture_function"):
# PYTEST==8.4.x
# Same
attr_value._fixture_function = self(attr_value._fixture_function)
else:
# Wrap the entire method inside 'freeze_time'
setattr(klass, attr, self(attr_value))
except (AttributeError, TypeError):
# Sometimes we can't set this for built-in types and custom callables
continue
return klass
def __enter__(self) -> Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory]:
return self.start()
def __exit__(self, *args: Any) -> None:
self.stop()
def start(self) -> Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory]:
if self.auto_tick_seconds:
freeze_factory: Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory] = StepTickTimeFactory(self.time_to_freeze, self.auto_tick_seconds)
elif self.tick:
freeze_factory = TickingDateTimeFactory(self.time_to_freeze, real_datetime.now())
else:
freeze_factory = FrozenDateTimeFactory(self.time_to_freeze)
is_already_started = len(freeze_factories) > 0
freeze_factories.append(freeze_factory)
tz_offsets.append(self.tz_offset)
ignore_lists.append(self.ignore)
tick_flags.append(self.tick)
if is_already_started:
return freeze_factory
# Change the modules
datetime.datetime = FakeDatetime # type: ignore[misc]
datetime.date = FakeDate # type: ignore[misc]
time.time = fake_time
time.monotonic = fake_monotonic
time.perf_counter = fake_perf_counter
time.localtime = fake_localtime # type: ignore
time.gmtime = fake_gmtime # type: ignore
time.strftime = fake_strftime # type: ignore
if uuid_generate_time_attr:
setattr(uuid, uuid_generate_time_attr, None)
uuid._UuidCreate = None # type: ignore[attr-defined]
uuid._last_timestamp = None # type: ignore[attr-defined]
copyreg.dispatch_table[real_datetime] = pickle_fake_datetime
copyreg.dispatch_table[real_date] = pickle_fake_date
# Change any place where the module had already been imported
to_patch = [
('real_date', real_date, FakeDate),
('real_datetime', real_datetime, FakeDatetime),
('real_gmtime', real_gmtime, fake_gmtime),
('real_localtime', real_localtime, fake_localtime),
('real_monotonic', real_monotonic, fake_monotonic),
('real_perf_counter', real_perf_counter, fake_perf_counter),
('real_strftime', real_strftime, fake_strftime),
('real_time', real_time, fake_time),
]
if _TIME_NS_PRESENT:
time.time_ns = fake_time_ns
to_patch.append(('real_time_ns', real_time_ns, fake_time_ns))
if _MONOTONIC_NS_PRESENT:
time.monotonic_ns = fake_monotonic_ns
to_patch.append(('real_monotonic_ns', real_monotonic_ns, fake_monotonic_ns))
if _PERF_COUNTER_NS_PRESENT:
time.perf_counter_ns = fake_perf_counter_ns
to_patch.append(('real_perf_counter_ns', real_perf_counter_ns, fake_perf_counter_ns))
if real_clock is not None:
# time.clock is deprecated and was removed in Python 3.8
time.clock = fake_clock # type: ignore[attr-defined]
to_patch.append(('real_clock', real_clock, fake_clock))
self.fake_names = tuple(fake.__name__ for real_name, real, fake in to_patch) # type: ignore
self.reals = {id(fake): real for real_name, real, fake in to_patch}
fakes = {id(real): fake for real_name, real, fake in to_patch}
add_change = self.undo_changes.append
# Save the current loaded modules
self.modules_at_start = set(sys.modules.keys())
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for mod_name, module in list(sys.modules.items()):
if mod_name is None or module is None or mod_name == __name__:
continue
elif mod_name.startswith(self.ignore) or mod_name.endswith('.six.moves'):
continue
elif (not hasattr(module, "__name__") or module.__name__ in ('datetime', 'time')):
continue
module_attrs = _get_cached_module_attributes(module)
for attribute_name, attribute_value in module_attrs:
fake = fakes.get(id(attribute_value))
if fake:
setattr(module, attribute_name, fake)
add_change((module, attribute_name, attribute_value))
if self.real_asyncio:
# To avoid breaking `asyncio.sleep()`, let asyncio event loops see real
# monotonic time even though we've just frozen `time.monotonic()` which
# is normally used there. If we didn't do this, `await asyncio.sleep()`
# would be hanging forever breaking many tests that use `freeze_time`.
#
# Note that we cannot statically tell the class of asyncio event loops
# because it is not officially documented and can actually be changed
# at run time using `asyncio.set_event_loop_policy`. That's why we check
# the type by creating a loop here and destroying it immediately.
event_loop = asyncio.new_event_loop()
event_loop.close()
EventLoopClass = type(event_loop)
add_change((EventLoopClass, "time", EventLoopClass.time)) # type: ignore
EventLoopClass.time = lambda self: real_monotonic() # type: ignore[method-assign]
return freeze_factory
def stop(self) -> None:
freeze_factories.pop()
ignore_lists.pop()
tick_flags.pop()
tz_offsets.pop()
if not freeze_factories:
datetime.datetime = real_datetime # type: ignore[misc]
datetime.date = real_date # type: ignore[misc]
copyreg.dispatch_table.pop(real_datetime)
copyreg.dispatch_table.pop(real_date)
for module_or_object, attribute, original_value in self.undo_changes:
setattr(module_or_object, attribute, original_value)
self.undo_changes = []
# Restore modules loaded after start()
modules_to_restore = set(sys.modules.keys()) - self.modules_at_start
self.modules_at_start = set()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for mod_name in modules_to_restore:
module = sys.modules.get(mod_name, None)
if mod_name is None or module is None:
continue
elif mod_name.startswith(self.ignore) or mod_name.endswith('.six.moves'):
continue
elif not hasattr(module, "__name__") or module.__name__ in ('datetime', 'time'):
continue
for module_attribute in dir(module):
if module_attribute in self.fake_names:
continue
try:
attribute_value = getattr(module, module_attribute)
except (ImportError, AttributeError, TypeError):
# For certain libraries, this can result in ImportError(_winreg) or AttributeError (celery)
continue
real = self.reals.get(id(attribute_value))
if real:
setattr(module, module_attribute, real)
time.time = real_time
time.monotonic = real_monotonic
time.perf_counter = real_perf_counter
time.gmtime = real_gmtime
time.localtime = real_localtime
time.strftime = real_strftime
time.clock = real_clock # type: ignore[attr-defined]
if _TIME_NS_PRESENT:
time.time_ns = real_time_ns
if _MONOTONIC_NS_PRESENT:
time.monotonic_ns = real_monotonic_ns
if _PERF_COUNTER_NS_PRESENT:
time.perf_counter_ns = real_perf_counter_ns
if uuid_generate_time_attr:
setattr(uuid, uuid_generate_time_attr, real_uuid_generate_time)
uuid._UuidCreate = real_uuid_create # type: ignore[attr-defined]
uuid._last_timestamp = None # type: ignore[attr-defined]
def decorate_coroutine(self, coroutine: "Callable[P, Awaitable[T]]") -> "Callable[P, Awaitable[T]]":
return wrap_coroutine(self, coroutine)
def _call_with_time_factory(self, time_factory: Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory], func: "Callable[P, T]", args: Any, kwargs: Any) -> T:
"""
Invoke a function and pass in the TimeFactory if necessary
:args: Original arguments to the function.
:kwargs: Original keyword arguments. Passed in as a dict in case the keys conflict with the other arguments to this function ('time_factory' or 'func')
"""
if self.as_arg and self.as_kwarg:
assert False, "You can't specify both as_arg and as_kwarg at the same time. Pick one."
if self.as_arg:
result = func(time_factory, *args, **kwargs) # type: ignore
elif self.as_kwarg:
kwargs[self.as_kwarg] = time_factory
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
return result
def decorate_generator_function(self, func: "Callable[P, Iterator[T]]") -> "Callable[P, Iterator[T]]":
@functools.wraps(func)
def wrapper(*args: "P.args", **kwargs: "P.kwargs") -> Iterator[T]:
with self as time_factory:
yield from self._call_with_time_factory(time_factory, func=func, args=args, kwargs=kwargs)
return wrapper
def decorate_callable(self, func: "Callable[P, T]") -> "Callable[P, T]":
@functools.wraps(func)
def wrapper(*args: "P.args", **kwargs: "P.kwargs") -> T:
with self as time_factory:
return self._call_with_time_factory(time_factory, func=func, args=args, kwargs=kwargs)
return wrapper
def freeze_time(time_to_freeze: Optional[_Freezable]=None, tz_offset: Union[int, datetime.timedelta]=0, ignore: Optional[List[str]]=None, tick: bool=False, as_arg: bool=False, as_kwarg: str='',
auto_tick_seconds: float=0, real_asyncio: bool=False) -> _freeze_time:
"""
Freezes time for testing purposes.
This function can be used as a decorator or a context manager to freeze time
during the execution of a block of code or a function. It provides various
options to customize the behavior of the frozen time.
Args:
time_to_freeze (Optional[_Freezable]): The datetime to freeze time at.
tz_offset (Union[int, datetime.timedelta]): The timezone offset to apply to the frozen time.
ignore (Optional[List[str]]): A list of module names to ignore when freezing time.
tick (bool): Whether to allow time to tick forward.
as_arg (bool): Whether to pass the frozen time as an argument to the decorated function.
as_kwarg (str): The name of the keyword argument to pass the frozen time to the decorated function.
auto_tick_seconds (float): The number of seconds to auto-tick the frozen time.
real_asyncio (bool): Whether to allow asyncio event loops to see real monotonic time.
Returns:
_freeze_time: An instance of the _freeze_time class.
"""
acceptable_times: Any = (type(None), str, datetime.date, datetime.timedelta,
types.FunctionType, types.GeneratorType)
if MayaDT is not None:
acceptable_times += MayaDT,
if not isinstance(time_to_freeze, acceptable_times):
raise TypeError(('freeze_time() expected None, a string, date instance, datetime '
'instance, MayaDT, timedelta instance, function or a generator, but got '
'type {}.').format(type(time_to_freeze)))
if tick and not _is_cpython:
raise SystemError('Calling freeze_time with tick=True is only compatible with CPython')
if isinstance(time_to_freeze, types.FunctionType):
return freeze_time(time_to_freeze(), tz_offset, ignore, tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if isinstance(time_to_freeze, types.GeneratorType):
return freeze_time(next(time_to_freeze), tz_offset, ignore, tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if MayaDT is not None and isinstance(time_to_freeze, MayaDT):
return freeze_time(time_to_freeze.datetime(), tz_offset, ignore,
tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if ignore is None:
ignore = []
ignore = ignore[:]
if config.settings.default_ignore_list:
ignore.extend(config.settings.default_ignore_list)
return _freeze_time(
time_to_freeze_str=time_to_freeze,
tz_offset=tz_offset,
ignore=ignore,
tick=tick,
as_arg=as_arg,
as_kwarg=as_kwarg,
auto_tick_seconds=auto_tick_seconds,
real_asyncio=real_asyncio,
)
# Setup adapters for sqlite
try:
# noinspection PyUnresolvedReferences
import sqlite3
except ImportError:
# Some systems have trouble with this
pass
else:
# These are copied from Python sqlite3.dbapi2
def adapt_date(val: datetime.date) -> str:
return val.isoformat()
def adapt_datetime(val: datetime.datetime) -> str:
return val.isoformat(" ")
sqlite3.register_adapter(FakeDate, adapt_date)
sqlite3.register_adapter(FakeDatetime, adapt_datetime)
# Setup converters for pymysql
try:
import pymysql.converters
except ImportError:
pass
else:
pymysql.converters.encoders[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.conversions[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.encoders[FakeDatetime] = pymysql.converters.encoders[real_datetime]
pymysql.converters.conversions[FakeDatetime] = pymysql.converters.encoders[real_datetime]
| _freeze_time |
python | crytic__slither | slither/detectors/naming_convention/naming_convention.py | {
"start": 278,
"end": 8860
} | class ____(AbstractDetector):
"""
Check if naming conventions are followed
https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions
Exceptions:
- Allow constant variables name/symbol/decimals to be lowercase (ERC20)
- Allow '_' at the beginning of the mixed_case match for private variables and unused parameters
- Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'
"""
ARGUMENT = "naming-convention"
HELP = "Conformity to Solidity naming conventions"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
LANGUAGE = "solidity"
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions"
WIKI_TITLE = "Conformance to Solidity naming conventions"
# region wiki_description
WIKI_DESCRIPTION = """
Solidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.
#### Rule exceptions
- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).
- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters."""
# endregion wiki_description
WIKI_RECOMMENDATION = "Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions)."
STANDARD_JSON = False
@staticmethod
def is_cap_words(name: str) -> bool:
return re.search("^[A-Z]([A-Za-z0-9]+)?_?$", name) is not None
@staticmethod
def is_immutable_naming(name: str) -> bool:
return re.search("^i_[a-z]([A-Za-z0-9]+)?_?$", name) is not None
@staticmethod
def is_state_naming(name: str) -> bool:
return re.search("^s_[a-z]([A-Za-z0-9]+)?_?$", name) is not None
@staticmethod
def is_mixed_case(name: str) -> bool:
return re.search("^[a-z]([A-Za-z0-9]+)?_?$", name) is not None
@staticmethod
def is_mixed_case_with_underscore(name: str) -> bool:
# Allow _ at the beginning to represent private variable
# or unused parameters
return re.search("^[_]?[a-z]([A-Za-z0-9]+)?_?$", name) is not None
@staticmethod
def is_upper_case_with_underscores(name: str) -> bool:
return re.search("^[A-Z0-9_]+_?$", name) is not None
@staticmethod
def should_avoid_name(name: str) -> bool:
return re.search("^[lOI]$", name) is not None
# pylint: disable=too-many-branches,too-many-statements
def _detect(self) -> List[Output]:
results = []
info: DETECTOR_INFO
for contract in self.contracts:
if not self.is_cap_words(contract.name):
info = ["Contract ", contract, " is not in CapWords\n"]
res = self.generate_result(info)
res.add(contract, {"target": "contract", "convention": "CapWords"})
results.append(res)
for struct in contract.structures_declared:
if not self.is_cap_words(struct.name):
info = ["Struct ", struct, " is not in CapWords\n"]
res = self.generate_result(info)
res.add(struct, {"target": "structure", "convention": "CapWords"})
results.append(res)
for event in contract.events_declared:
if not self.is_cap_words(event.name):
info = ["Event ", event, " is not in CapWords\n"]
res = self.generate_result(info)
res.add(event, {"target": "event", "convention": "CapWords"})
results.append(res)
for func in contract.functions_declared:
if func.is_constructor:
continue
if not self.is_mixed_case(func.name):
if func.visibility in [
"internal",
"private",
] and self.is_mixed_case_with_underscore(func.name):
continue
if func.name.startswith(("echidna_", "crytic_")):
continue
info = ["Function ", func, " is not in mixedCase\n"]
res = self.generate_result(info)
res.add(func, {"target": "function", "convention": "mixedCase"})
results.append(res)
for argument in func.parameters:
# Ignore parameter names that are not specified i.e. empty strings
if argument.name == "":
continue
if argument in func.variables_read_or_written:
correct_naming = self.is_mixed_case(argument.name)
else:
correct_naming = self.is_mixed_case_with_underscore(argument.name)
if not correct_naming:
info = ["Parameter ", argument, " is not in mixedCase\n"]
res = self.generate_result(info)
res.add(argument, {"target": "parameter", "convention": "mixedCase"})
results.append(res)
for var in contract.state_variables_declared:
if self.should_avoid_name(var.name):
info = [
"Variable ",
var,
" is single letter l, O, or I, which should not be used\n",
]
res = self.generate_result(info)
res.add(
var,
{
"target": "variable",
"convention": "l_O_I_should_not_be_used",
},
)
results.append(res)
if var.is_constant is True:
# For ERC20 compatibility
if var.name in ["symbol", "name", "decimals"]:
continue
if var.visibility == "public":
continue
if not self.is_upper_case_with_underscores(var.name):
info = [
"Constant ",
var,
" is not in UPPER_CASE_WITH_UNDERSCORES\n",
]
res = self.generate_result(info)
res.add(
var,
{
"target": "variable_constant",
"convention": "UPPER_CASE_WITH_UNDERSCORES",
},
)
results.append(res)
else:
if var.visibility in ["private", "internal"]:
correct_naming = self.is_mixed_case_with_underscore(
var.name
) or self.is_state_naming(var.name)
if not correct_naming and var.is_immutable:
correct_naming = self.is_immutable_naming(var.name)
else:
correct_naming = self.is_mixed_case(var.name)
if not correct_naming:
info = ["Variable ", var, " is not in mixedCase\n"]
res = self.generate_result(info)
res.add(var, {"target": "variable", "convention": "mixedCase"})
results.append(res)
for enum in contract.enums_declared:
if not self.is_cap_words(enum.name):
info = ["Enum ", enum, " is not in CapWords\n"]
res = self.generate_result(info)
res.add(enum, {"target": "enum", "convention": "CapWords"})
results.append(res)
for modifier in contract.modifiers_declared:
if not self.is_mixed_case(modifier.name):
info = ["Modifier ", modifier, " is not in mixedCase\n"]
res = self.generate_result(info)
res.add(modifier, {"target": "modifier", "convention": "mixedCase"})
results.append(res)
return results
@staticmethod
def _format(slither, result):
custom_format(slither, result)
| NamingConvention |
python | getsentry__sentry | src/sentry/sentry_apps/services/app/model.py | {
"start": 2346,
"end": 3875
} | class ____(RpcModel):
id: int = -1
scope_list: list[str] = Field(default_factory=list)
application_id: int = -1
application: RpcApiApplication | None = None
proxy_user_id: int | None = None # can be null on deletion.
owner_id: int = -1 # relation to an organization
name: str = ""
slug: str = ""
uuid: str = ""
events: list[str] = Field(default_factory=list)
webhook_url: str | None = None
is_alertable: bool = False
is_published: bool = False
is_unpublished: bool = False
is_internal: bool = True
is_publish_request_inprogress: bool = False
status: str = ""
metadata: dict[str, Any] = Field(default_factory=dict)
avatars: list[RpcSentryAppAvatar] = Field(default_factory=list)
def show_auth_info(self, access: Any) -> bool:
encoded_scopes = set({"%s" % scope for scope in list(access.scopes)})
return set(self.scope_list).issubset(encoded_scopes)
def build_signature(self, body: str) -> str:
assert self.application, "Cannot build_signature without an application"
secret = self.application.client_secret
return hmac.new(
key=secret.encode("utf-8"), msg=body.encode("utf-8"), digestmod=sha256
).hexdigest()
# Properties are copied from the sentry app ORM model.
@property
def slug_for_metrics(self) -> str:
if self.is_internal:
return "internal"
if self.is_unpublished:
return "unpublished"
return self.slug
| RpcSentryApp |
python | kamyu104__LeetCode-Solutions | Python/longest-happy-string.py | {
"start": 1144,
"end": 1731
} | class ____(object):
def longestDiverseString(self, a, b, c):
"""
:type a: int
:type b: int
:type c: int
:rtype: str
"""
choices = [[a, 'a'], [b, 'b'], [c, 'c']]
result = []
for _ in xrange(a+b+c):
choices.sort(reverse=True)
for i, (x, c) in enumerate(choices):
if x and result[-2:] != [c, c]:
result.append(c)
choices[i][0] -= 1
break
else:
break
return "".join(result)
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/fault_tolerance_test_base.py | {
"start": 2182,
"end": 4634
} | class ____(object):
def __init__(self, coordinator):
self.cluster_coord = coordinator
self.strategy = self.cluster_coord.strategy
with self.cluster_coord.strategy.scope():
self.build()
def build(self):
self.w = variables.Variable(
initial_value=random_ops.random_uniform((10, 10)), dtype=dtypes.float32)
self.iterations = variables.Variable(initial_value=0, dtype=dtypes.int32)
# Allow external control to make the model run its train_fn in an infinite
# loop. This allows us to reliably test worker preemption in the middle of
# function execution.
self.do_infinite_step = variables.Variable(False)
self.rebuild_iterators()
def rebuild_iterators(self, use_dataset_fn=True):
if use_dataset_fn:
def dataset_fn():
data = random_ops.random_uniform((10, 10))
dataset = dataset_ops.DatasetV2.from_tensors([data]).repeat()
return dataset
def distribute_dataset_fn():
return self.cluster_coord.strategy.distribute_datasets_from_function(
lambda _: dataset_fn())
self.iterator = iter(
self.cluster_coord.create_per_worker_dataset(distribute_dataset_fn))
self.iterator2 = iter(
self.cluster_coord.create_per_worker_dataset(distribute_dataset_fn))
else:
data = random_ops.random_uniform((10, 10))
dataset = dataset_ops.DatasetV2.from_tensors([data]).repeat()
self.iterator = iter(
self.cluster_coord.create_per_worker_dataset(dataset))
self.iterator2 = iter(
self.cluster_coord.create_per_worker_dataset(dataset))
def _train_fn_internal(self, iterator, iterator2):
x = math_ops.matmul(array_ops.squeeze(next(iterator)), self.w)
x = math_ops.matmul(array_ops.squeeze(next(iterator2)), x)
x = math_ops.matmul(random_ops.random_uniform((10, 10)), x)
self.w.assign_add(x)
@def_function.function
def train_fn(self, iterator, iterator2):
self._train_fn_internal(iterator, iterator2)
while self.do_infinite_step:
self._train_fn_internal(iterator, iterator2)
self.iterations.assign_add(1)
def schedule_training_functions(self, num_steps):
with self.strategy.scope():
for _ in range(num_steps):
self.cluster_coord.schedule(
self.train_fn, args=(self.iterator, self.iterator2))
def join_training_functions(self):
self.do_infinite_step.assign(False)
self.cluster_coord.join()
| Model |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_base.py | {
"start": 1082,
"end": 6084
} | class ____(autotrackable.AutoTrackable):
"""The TPUEmbedding Base class.
This class only contains the basic logic to check the feature config and table
config for the tpu embedding mid level APIs.
"""
def __init__(
self,
feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic
optimizer: Optional[tpu_embedding_v2_utils._Optimizer] = None): # pylint:disable=protected-access
"""Creates the TPUEmbeddingBase object."""
self._feature_config = feature_config
self._output_shapes = []
for feature in nest.flatten(feature_config):
self._output_shapes.append(feature.output_shape)
# Set table order here to the order of the first occurrence of the table in
# a feature provided by the user. The order of this struct must be fixed
# to provide the user with deterministic behavior over multiple
# instantiations.
self._table_config = []
for feature in nest.flatten(feature_config):
if feature.table not in self._table_config:
self._table_config.append(feature.table)
# Ensure tables have unique names. Also error check the optimizer as we
# specifically don't do that in the TableConfig class to allow high level
# APIs that are built on this to use strings/other classes to represent
# optimizers (before they are passed to this class).
table_names = []
for i, table in enumerate(self._table_config):
if table.optimizer is None:
# TODO(bfontain) Should we allow some sort of optimizer merging here?
table.optimizer = optimizer
if (table.optimizer is not None and
not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access
raise ValueError("{} is an unsupported optimizer class. Please pass an "
"instance of one of the optimizer classes under "
"tf.tpu.experimental.embedding.".format(
type(table.optimizer)))
if table.name is None:
table.name = "table_{}".format(i)
if table.name in table_names:
raise ValueError("Tables must have a unique name. "
f"Multiple tables with name {table.name} found.")
table_names.append(table.name)
self._built = False
@property
def embedding_tables(self):
"""Returns a dict of embedding tables, keyed by `TableConfig`."""
raise NotImplementedError
def _create_variables(self, table: tpu_embedding_v2_utils.TableConfig,
trainable: bool) -> Dict[Text, tf_variables.Variable]:
"""Create all variables including table variables and slot variables."""
variable_shape = (table.vocabulary_size, table.dim)
def getter(name, shape, dtype, initializer, trainable):
del shape
# _add_variable_with_custom_getter clears the shape sometimes, so we
# take the global shape from outside the getter.
initial_value = functools.partial(
initializer, variable_shape, dtype=dtype)
return tf_variables.Variable(
name=name,
initial_value=initial_value,
shape=variable_shape,
dtype=dtype,
trainable=trainable)
def variable_creator(name, initializer, trainable=True):
# Use add_variable_with_custom_getter here so that we take advantage of
# the checkpoint loading to allow restore before the variables get
# created which avoids double initialization.
return self._add_variable_with_custom_getter(
name=name,
initializer=initializer,
shape=variable_shape,
dtype=dtypes.float32,
getter=getter,
trainable=trainable)
parameters = variable_creator(
table.name, table.initializer, trainable=trainable)
def slot_creator(name, initializer):
return variable_creator(table.name + "/" + name, initializer, False)
if table.optimizer is not None:
slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access
else:
slot_vars = {}
slot_vars["parameters"] = parameters
return slot_vars
def _create_variables_and_slots(self):
"""Create variables and slots variables for TPU embeddings."""
raise NotImplementedError
def build(self):
"""Create variables and slots variables for TPU embeddings."""
if self._built:
return
self._variables = self._create_variables_and_slots()
self._built = True
def __call__(self, features: Any, weights: Optional[Any] = None) -> Any:
"""Call the mid level api to do embedding lookup."""
if not self._built:
self.build()
return self.embedding_lookup(features, weights)
def embedding_lookup(self,
features: Any,
weights: Optional[Any] = None) -> Any:
"""Lookup the embedding table using the input features."""
raise NotImplementedError
| TPUEmbeddingBase |
python | nedbat__coveragepy | tests/test_process.py | {
"start": 40303,
"end": 43243
} | class ____(CoverageTest):
"""Tests of sys.excepthook support."""
# TODO: do we need these as process tests if we have test_execfile.py:RunFileTest?
def test_excepthook(self) -> None:
self.make_file(
"excepthook.py",
"""\
import sys
def excepthook(*args):
print('in excepthook')
if maybe == 2:
print('definitely')
sys.excepthook = excepthook
maybe = 1
raise RuntimeError('Error Outside')
""",
)
cov_st, cov_out = self.run_command_status("coverage run excepthook.py")
py_st, py_out = self.run_command_status("python excepthook.py")
assert cov_st == py_st
assert cov_st == 1
assert "in excepthook" in py_out
assert cov_out == py_out
# Read the coverage file and see that excepthook.py has 7 lines
# executed.
data = coverage.CoverageData()
data.read()
print(f"{line_counts(data) = }")
print(f"{data = }")
print("data.lines excepthook.py:", data.lines(os.path.abspath("excepthook.py")))
assert line_counts(data)["excepthook.py"] == 7
@pytest.mark.skipif(
not env.CPYTHON,
reason="non-CPython handles excepthook exits differently, punt for now.",
)
def test_excepthook_exit(self) -> None:
self.make_file(
"excepthook_exit.py",
"""\
import sys
def excepthook(*args):
print('in excepthook')
sys.exit(0)
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""",
)
cov_st, cov_out = self.run_command_status("coverage run excepthook_exit.py")
py_st, py_out = self.run_command_status("python excepthook_exit.py")
assert cov_st == py_st
assert cov_st == 0
assert py_out == "in excepthook\n"
assert cov_out == py_out
@pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.")
def test_excepthook_throw(self) -> None:
self.make_file(
"excepthook_throw.py",
"""\
import sys
def excepthook(*args):
# Write this message to stderr so that we don't have to deal
# with interleaved stdout/stderr comparisons in the assertions
# in the test.
sys.stderr.write('in excepthook\\n')
raise RuntimeError('Error Inside')
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""",
)
cov_out = self.run_command("coverage run excepthook_throw.py", status=1)
py_out = self.run_command("python excepthook_throw.py", status=1)
assert "in excepthook" in py_out
assert cov_out == py_out
| ExcepthookTest |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 22639,
"end": 22961
} | class ____(Benchmark):
params = [
[1, 2, 3, 8],
[100, 1000, 10000],
]
param_names = ["order", "size"]
def setup(self, order, size):
np.random.random(1234)
self.x = np.random.random(size)
def time_moment(self, order, size):
stats.moment(self.x, order)
| BenchMoment |
python | google__pytype | pytype/vm.py | {
"start": 2096,
"end": 151676
} | class ____:
"""A bytecode VM that generates a cfg as it executes."""
# This class is defined inside VirtualMachine so abstract.py can use it.
class VirtualMachineRecursionError(Exception):
pass
def __init__(self, ctx):
"""Construct a TypegraphVirtualMachine."""
self.ctx = ctx # context.Context
# The call stack of frames.
self.frames: list[frame_state.Frame] = []
# The current frame.
self.frame: frame_state.Frame = None
# A map from names to the late annotations that depend on them. Every
# LateAnnotation depends on a single undefined name, so once that name is
# defined, we immediately resolve the annotation.
self.late_annotations: dict[str, list[abstract.LateAnnotation]] = (
collections.defaultdict(list)
)
# Memoize which overlays are loaded.
self.loaded_overlays: dict[str, overlay_lib.Overlay | None] = {}
self.has_unknown_wildcard_imports: bool = False
# pyformat: disable
self.opcode_traces: list[tuple[
opcodes.Opcode | None,
Any,
tuple[list[abstract.BaseValue] | None, ...]
]] = []
# pyformat: enable
# Store the ordered bytecode after all preprocessing is done
self.block_graph = None
# Track the order of creation of local vars, for attrs and dataclasses.
self.local_ops: dict[str, list[LocalOp]] = {}
# Record the annotated and original values of locals.
self.annotated_locals: dict[str, dict[str, abstract_utils.Local]] = {}
self.filename: str = None
self.functions_type_params_check: list[
tuple[abstract.InterpreterFunction, opcodes.Opcode]
] = []
self._maximum_depth = None # set by run_program() and analyze()
self._director: directors.Director = None
self._analyzing = False # Are we in self.analyze()?
self._importing = False # Are we importing another file?
self._trace_opcodes = True # whether to trace opcodes
# If set, we will generate LateAnnotations with this stack rather than
# logging name errors.
self._late_annotations_stack = None
# Mapping of Variables to python variable names. {id: int -> name: str}
# Note that we don't need to scope this to the frame because we don't reuse
# variable ids.
self._var_names = {}
self._branch_tracker: pattern_matching.BranchTracker = None
# Locals attached to the block graph
self.block_env = block_environment.Environment()
# Function kwnames are stored in the vm by KW_NAMES and retrieved by CALL
self._kw_names = ()
# Cache for _import_module.
self._imported_modules_cache = {}
@property
def current_local_ops(self):
return self.local_ops[self.frame.f_code.name]
@property
def current_annotated_locals(self):
return self.annotated_locals[self.frame.f_code.name]
@property
def current_opcode(self) -> opcodes.Opcode | None:
return self.frame and self.frame.current_opcode
@property
def current_line(self) -> int | None:
current_opcode = self.current_opcode
return current_opcode and current_opcode.line
@contextlib.contextmanager
def _suppress_opcode_tracing(self):
old_trace_opcodes = self._trace_opcodes
self._trace_opcodes = False
try:
yield
finally:
self._trace_opcodes = old_trace_opcodes
@contextlib.contextmanager
def generate_late_annotations(self, stack):
old_late_annotations_stack = self._late_annotations_stack
self._late_annotations_stack = stack
try:
yield
finally:
self._late_annotations_stack = old_late_annotations_stack
def trace_opcode(self, op, symbol, val):
"""Record trace data for other tools to use."""
if not self._trace_opcodes:
return
if self.frame and not op:
op = self.frame.current_opcode
if not op:
# If we don't have a current opcode, don't emit a trace.
return
def get_data(v):
data = getattr(v, "data", None)
# Sometimes v is a binding.
return [data] if data and not isinstance(data, list) else data
if isinstance(val, tuple):
assert val
data = tuple(get_data(v) for v in val)
else:
data = (get_data(val),)
rec = (op, symbol, data)
self.opcode_traces.append(rec)
def remaining_depth(self):
assert self._maximum_depth is not None
return self._maximum_depth - len(self.frames)
def is_at_maximum_depth(self):
return len(self.frames) > self._maximum_depth
def _is_match_case_op(self, op):
"""Should we handle case matching for this opcode."""
# A case statement generates multiple opcodes on the same line. Since the
# director matches on line numbers, we only trigger the case handler on a
# specific opcode (which varies depending on the type of match)
opname = op.__class__.__name__
# Opcodes generated by class and sequence matches.
is_match = opname.startswith("MATCH_")
# Opcodes generated by various matches against constant literals.
is_cmp_match = opname in ("COMPARE_OP", "IS_OP", "CONTAINS_OP")
is_none_match = opname in (
"POP_JUMP_FORWARD_IF_NOT_NONE", # 3.11
"POP_JUMP_IF_NOT_NONE", # 3.12
)
# `case _:` (match not captured) generates a NOP.
# `case _ as x:` (match captured) generates a STORE_FAST. (In 3.11 it also
# generates other opcodes. We ignore them.) The match itself does not
# generate any specific opcode, just stack manipulations.
is_default_match = opname == "NOP" or (
isinstance(op, opcodes.STORE_FAST)
and op.line in self._branch_tracker.matches.defaults
)
return is_match or is_cmp_match or is_default_match or is_none_match
def _handle_match_case(self, state, op):
"""Track type narrowing and default cases in a match statement."""
if not self._is_match_case_op(op):
return state
if op.line in self._branch_tracker.matches.defaults:
node_label = "MatchDefault"
self._branch_tracker.add_default_branch(op)
else:
node_label = "MatchCase"
type_trackers = self._branch_tracker.get_current_type_trackers(op)
if not type_trackers:
return state
for type_tracker in type_trackers:
match_var = type_tracker.match_var
name = self._var_names.get(match_var.id)
if name and not isinstance(op, opcodes.MATCH_CLASS):
# The match statement generates a linear "main path" through the cfg
# (since it checks every branch sequentially), so if we have MATCH_CLASS
# branches we narrow the type as we progress. Since MATCH_CLASS
# positively narrows the type within its own branch, this negatively
# narrowed type only applies to non-class-match branches.
state = state.forward_cfg_node(node_label)
obj_var = type_tracker.get_narrowed_match_var(state.node)
state = self._store_local_or_cellvar(state, name, obj_var)
return state
@functools.cached_property
def _typings_type_var(self):
return typing_overlay.TypeVar.make(self.ctx, None).to_variable(
self.ctx.root_node
)
@functools.cached_property
def _typings_paramspec(self):
return typing_overlay.ParamSpec.make(self.ctx, None).to_variable(
self.ctx.root_node
)
@functools.cached_property
def _typings_generic(self):
return typing_overlay.Generic("Generic", self.ctx).to_variable(
self.ctx.root_node
)
def run_instruction(
self, op: opcodes.Opcode, state: frame_state.FrameState
) -> frame_state.FrameState:
"""Run a single bytecode instruction.
Args:
op: An opcode.
state: The state just before running this instruction.
Returns:
The state right after this instruction that should roll over to the
subsequent instruction. If this opcode aborts this function (e.g. through
a 'raise'), then the state's "why" attribute is set to the abort reason.
Raises:
VirtualMachineError: if a fatal error occurs.
"""
_opcode_counter.inc(op.name)
self.frame.current_opcode = op
self._importing = "IMPORT" in op.__class__.__name__
if log.isEnabledFor(logging.INFO):
vm_utils.log_opcode(op, state, self.frame, len(self.frames))
# Track type and enum case narrowing in match statements (we need to do this
# before we run the opcode).
if op.line in self._branch_tracker.matches.match_cases:
state = self._handle_match_case(state, op)
# dispatch
bytecode_fn = getattr(self, f"byte_{op.name}", None)
if bytecode_fn is None:
raise VirtualMachineError(f"Unknown opcode: {op.name}")
state = bytecode_fn(state, op)
if state.why in ("reraise", "Never"):
state = state.set_why("exception")
implicit_return = (
op.name in ("RETURN_VALUE", "RETURN_CONST")
and op.line not in self._director.return_lines
)
if len(self.frames) <= 2:
# We do exhaustiveness checking only when doing a top-level analysis of
# the match code.
for err in self._branch_tracker.check_ending(op, implicit_return):
self.ctx.errorlog.incomplete_match(self.frames, err.line, err.cases)
self.frame.current_opcode = None
return state
def _run_frame_blocks(self, frame, node, annotated_locals):
"""Runs a frame's code blocks."""
frame.states[frame.f_code.get_first_opcode()] = frame_state.FrameState.init(
node, self.ctx
)
frame_name = frame.f_code.name
if frame_name not in self.local_ops or frame_name != "<module>":
# abstract_utils.eval_expr creates a temporary frame called "<module>". We
# don't care to track locals for this frame and don't want it to overwrite
# the locals of the actual module frame.
self.local_ops[frame_name] = []
self.annotated_locals[frame_name] = annotated_locals or {}
else:
assert annotated_locals is None
can_return = False
return_nodes = []
finally_tracker = vm_utils.FinallyStateTracker()
process_blocks.adjust_returns(frame.f_code, self._director.block_returns)
for block in frame.f_code.order:
state = frame.states.get(block[0])
if not state:
log.warning("Skipping block %d, nothing connects to it.", block.id)
continue
self.block_env.add_block(frame, block)
self.frame.current_block = block
op = None
for op in block:
state = self.run_instruction(op, state)
# Check if we have to carry forward the return state from an except
# block to the END_FINALLY opcode.
new_why = finally_tracker.process(op, state, self.ctx)
if new_why:
state = state.set_why(new_why)
if state.why:
# we can't process this block any further
break
assert op
if state.why:
# If we raise an exception or return in an except block do not
# execute any target blocks it has added.
if finally_tracker.check_early_exit(state):
m_frame = self.frame
assert m_frame is not None
for target in m_frame.targets[block.id]:
del frame.states[target]
self.block_env.mark_dead_end(block)
# return, raise, or yield. Leave the current frame.
can_return |= state.why in ("return", "yield")
return_nodes.append(state.node)
elif op.carry_on_to_next():
# We're starting a new block, so start a new CFG node. We don't want
# nodes to overlap the boundary of blocks.
state = state.forward_cfg_node("NewBlock")
frame.states[op.next] = state.merge_into(frame.states.get(op.next))
vm_utils.update_excluded_types(node, self.ctx)
return can_return, return_nodes
def run_frame(self, frame, node, annotated_locals=None):
"""Run a frame (typically belonging to a method)."""
self.push_frame(frame)
try:
can_return, return_nodes = self._run_frame_blocks(
frame, node, annotated_locals
)
finally:
self.pop_frame(frame)
if not return_nodes:
# Happens if the function never returns. (E.g. an infinite loop)
assert not frame.return_variable.bindings
frame.return_variable.AddBinding(self.ctx.convert.unsolvable, [], node)
else:
node = self.ctx.join_cfg_nodes(return_nodes)
if not can_return:
assert not frame.return_variable.bindings
# We purposely don't check Never against this function's
# annotated return type. Raising an error in an unimplemented function
# and documenting the intended return type in an annotation is a
# common pattern.
self._set_frame_return(
node, frame, self.ctx.convert.never.to_variable(node)
)
return node, frame.return_variable
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self, frame):
popped_frame = self.frames.pop()
assert popped_frame == frame
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
def _call(
self, state, obj, method_name, args
) -> tuple[frame_state.FrameState, cfg.Variable]:
state, method = self.load_attr(state, obj, method_name)
return self.call_function_with_state(state, method, args)
def make_frame(
self,
node,
code,
f_globals,
f_locals,
callargs=None,
closure=None,
new_locals=False,
func=None,
first_arg=None,
substs=(),
):
"""Create a new frame object, using the given args, globals and locals."""
if any(code is f.f_code for f in self.frames):
log.info("Detected recursion in %s", code.name or code.filename)
raise self.VirtualMachineRecursionError()
log.info(
"make_frame: callargs=%s, f_globals=[%s@%x], f_locals=[%s@%x]",
vm_utils.repper(callargs),
type(f_globals).__name__,
id(f_globals),
type(f_locals).__name__,
id(f_locals),
)
# Implement NEWLOCALS flag. See Objects/frameobject.c in CPython.
# (Also allow to override this with a parameter, Python 3 doesn't always set
# it to the right value, e.g. for class-level code.)
if code.has_newlocals() or new_locals:
f_locals = abstract.LazyConcreteDict("locals", {}, self.ctx)
return frame_state.Frame(
node,
self.ctx,
code,
f_globals,
f_locals,
self.frame,
callargs or {},
closure,
func,
first_arg,
substs,
)
def simple_stack(self, opcode=None):
"""Get a stack of simple frames.
Args:
opcode: Optionally, an opcode to create a stack for.
Returns:
If an opcode is provided, a stack with a single frame at that opcode.
Otherwise, the VM's current stack converted to simple frames.
"""
if opcode is not None:
return (frame_state.SimpleFrame(opcode),)
elif self.frame:
# Simple stacks are used for things like late annotations, which don't
# need tracebacks in their errors, so we convert just the current frame.
return (frame_state.SimpleFrame(self.frame.current_opcode),)
else:
return ()
def stack(self, func=None):
"""Get a frame stack for the given function for error reporting."""
if (
isinstance(func, abstract.INTERPRETER_FUNCTION_TYPES)
and not self.current_opcode
):
return self.simple_stack(func.get_first_opcode())
else:
return self.frames
def push_abstract_exception(self, state):
"""Push an exception onto the data stack."""
if self.ctx.python_version >= (3, 11):
# In 3.11+, exceptions are represented as one item rather than the three
# items in 3.10-. Additionally, this item is used only for pytype's
# internal bookkeeping, so it can be Any.
state = state.push(self.ctx.new_unsolvable(state.node))
else:
# I have no idea why we need to push the exception twice! See
# test_exceptions.TestExceptions.test_reuse_name for a test that fails if
# we don't do this.
for _ in range(2):
tb = self.ctx.convert.build_list(state.node, [])
value = self.ctx.convert.create_new_unknown(state.node)
exctype = self.ctx.convert.create_new_unknown(state.node)
state = state.push(tb, value, exctype)
return state
def pop_abstract_exception(self, state):
# We don't push the special except-handler block, so we don't need to
# pop it, either.
if self.ctx.python_version >= (3, 11):
state, _ = state.pop()
else:
state, _ = state.popn(3)
return state
def resume_frame(self, node, frame):
frame.f_back = self.frame
log.info("resume_frame: %r", frame)
node, val = self.run_frame(frame, node)
frame.f_back = None
return node, val
def compile_src(
self, src, filename=None, mode="exec", store_blockgraph=False
) -> blocks.OrderedCode:
"""Compile the given source code."""
code = pyc.compile_src(
src,
python_version=self.ctx.python_version,
python_exe=self.ctx.options.python_exe,
filename=filename,
mode=mode,
)
code, block_graph = blocks.process_code(code)
if store_blockgraph:
self.block_graph = block_graph
return code
def run_bytecode(self, node, code, f_globals=None, f_locals=None):
"""Run the given bytecode."""
if f_globals is not None:
assert f_locals
else:
assert not self.frames
assert f_locals is None
# __name__, __doc__, and __package__ are unused placeholder values.
f_globals = f_locals = abstract.LazyConcreteDict(
"globals",
{
"__builtins__": self.ctx.loader.builtins,
"__name__": "__main__",
"__file__": code.filename,
"__doc__": None,
"__package__": None,
},
self.ctx,
)
# __name__ is retrieved by class bodies. So make sure that it's preloaded,
# otherwise we won't properly cache the first class initialization.
f_globals.load_lazy_attribute("__name__")
frame = self.make_frame(node, code, f_globals=f_globals, f_locals=f_locals)
node, return_var = self.run_frame(frame, node)
return node, frame.f_globals, frame.f_locals, return_var
def run_program(self, src, filename, maximum_depth):
"""Run the code and return the CFG nodes.
Args:
src: The program source code.
filename: The filename the source is from.
maximum_depth: Maximum depth to follow call chains.
Returns:
A tuple (CFGNode, set) containing the last CFGNode of the program as
well as all the top-level names defined by it.
"""
self.filename = filename
self._maximum_depth = maximum_depth
src = preprocess.augment_annotations(src)
src_tree = directors.parse_src(src, self.ctx.python_version)
code = self.compile_src(src, filename=filename, store_blockgraph=True)
# In Python 3.8+, opcodes are consistently at the first line of the
# corresponding source code. Before 3.8, they are on one of the last lines
# but the exact positioning is unpredictable, so we pass the bytecode to the
# director to make adjustments based on the opcodes' observed line numbers.
director = directors.Director(
src_tree, self.ctx.errorlog, filename, self.ctx.options.disable
)
# This modifies the errorlog passed to the constructor. Kind of ugly,
# but there isn't a better way to wire both pieces together.
self.ctx.errorlog.set_error_filter(director.filter_error)
self._director = director
self.ctx.options.set_feature_flags(director.features)
self._branch_tracker = pattern_matching.BranchTracker(
director.matches, self.ctx
)
code = process_blocks.merge_annotations(
code, self._director.annotations, self._director.param_annotations
)
visitor = vm_utils.FindIgnoredTypeComments(self._director.type_comments)
pyc.visit(code, visitor)
for line in visitor.ignored_lines():
self.ctx.errorlog.ignored_type_comment(
self.filename, line, self._director.type_comments[line]
)
if self.ctx.options.debug_constant_folding:
before = _bytecode_to_string(code)
code = constant_folding.fold_constants(code)
after = _bytecode_to_string(code)
print(
"\n".join(
difflib.unified_diff(before.splitlines(), after.splitlines())
)
)
else:
code = constant_folding.fold_constants(code)
process_blocks.adjust_returns(code, self._director.block_returns)
node, f_globals, f_locals, _ = self.run_bytecode(self.ctx.root_node, code)
logging.info("Done running bytecode, postprocessing globals")
for annot in itertools.chain.from_iterable(self.late_annotations.values()):
# If `annot` has already been resolved, this is a no-op. Otherwise, it
# contains a real name error that will be logged when we resolve it now.
annot.resolve(node, f_globals, f_locals)
self.flatten_late_annotation(node, annot, f_globals)
self.late_annotations = None # prevent adding unresolvable annotations
assert not self.frames, "Frames left over!"
log.info("Final node: <%d>%s", node.id, node.name)
return node, f_globals.members
def flatten_late_annotation(self, node, annot, f_globals):
flattened_expr = annot.flatten_expr()
if flattened_expr != annot.expr:
annot.expr = flattened_expr
f_globals.members[flattened_expr] = annot.to_variable(node)
def set_var_name(self, var, name):
self._var_names[var.id] = name
def get_var_name(self, var):
"""Get the python variable name corresponding to a Variable."""
# Variables in _var_names correspond to LOAD_* opcodes, which means they
# have been retrieved from a symbol table like locals() directly by name.
if var.id in self._var_names:
return self._var_names[var.id]
# Look through the source set of a variable's bindings to find the variable
# created by a LOAD operation. If a variable has multiple sources, don't try
# to match it to a name.
sources = set()
for b in var.bindings:
for o in b.origins:
for s in o.source_sets:
sources |= s
names = {self._var_names.get(s.variable.id) for s in sources}
return next(iter(names)) if len(names) == 1 else None
def get_all_named_vars(self):
# Make a shallow copy of the dict so callers aren't touching internal data.
return dict(self._var_names)
def binary_operator(self, state, name, report_errors=True):
state, (x, y) = state.popn(2)
with self._suppress_opcode_tracing(): # don't trace the magic method call
state, ret = vm_utils.call_binary_operator(
state, name, x, y, report_errors=report_errors, ctx=self.ctx
)
self.trace_opcode(None, name, ret)
return state.push(ret)
def inplace_operator(self, state, name):
state, (x, y) = state.popn(2)
state, ret = vm_utils.call_inplace_operator(state, name, x, y, self.ctx)
return state.push(ret)
def trace_unknown(self, *args):
"""Fired whenever we create a variable containing 'Unknown'."""
return NotImplemented
def trace_call(self, *args):
"""Fired whenever we call a builtin using unknown parameters."""
return NotImplemented
def trace_functiondef(self, *args):
return NotImplemented
def trace_classdef(self, *args):
return NotImplemented
def call_init(self, node, unused_instance):
# This dummy implementation is overwritten in tracer_vm.py.
return node
def init_class(self, node, cls, container=None, extra_key=None):
# This dummy implementation is overwritten in tracer_vm.py.
del cls, container, extra_key
return NotImplemented
def call_function_with_state(
self,
state: frame_state.FrameState,
funcv: cfg.Variable,
posargs: tuple[cfg.Variable, ...],
namedargs: dict[str, cfg.Variable] | None = None,
starargs: cfg.Variable | None = None,
starstarargs: cfg.Variable | None = None,
fallback_to_unsolvable: bool = True,
):
"""Call a function with the given state."""
assert starargs is None or isinstance(starargs, cfg.Variable)
assert starstarargs is None or isinstance(starstarargs, cfg.Variable)
args = function.Args(
posargs=posargs,
namedargs=namedargs,
starargs=starargs,
starstarargs=starstarargs,
)
node, ret = function.call_function(
self.ctx,
state.node,
funcv,
args,
fallback_to_unsolvable,
allow_never=True,
)
if ret.data == [self.ctx.convert.never]:
state = state.set_why("Never")
state = state.change_cfg_node(node)
if len(funcv.data) == 1:
# Check for test assertions that narrow the type of a variable.
state = self._check_test_assert(state, funcv, posargs)
return state, ret
def call_with_fake_args(self, node0, funcv):
"""Attempt to call the given function with made-up arguments."""
return node0, self.ctx.new_unsolvable(node0)
@contextlib.contextmanager
def _reset_overloads(self, func):
with contextlib.ExitStack() as stack:
for f in func.data:
if isinstance(f, abstract.INTERPRETER_FUNCTION_TYPES):
stack.enter_context(f.reset_overloads())
yield
def _call_function_from_stack_helper(
self, state, funcv, posargs, namedargs, starargs, starstarargs
):
"""Helper for call_function_from_stack."""
for f in funcv.data:
if isinstance(f, abstract.Function):
if "typing.dataclass_transform" in f.decorators:
func = dataclass_overlay.Dataclass.transform(self.ctx, f)
funcv_to_call = func.to_variable(state.node)
break
else:
funcv_to_call = funcv
with self._reset_overloads(funcv):
state, ret = self.call_function_with_state(
state, funcv_to_call, posargs, namedargs, starargs, starstarargs
)
return state.push(ret)
def call_function_from_stack(self, state, num, starargs, starstarargs):
"""Pop arguments for a function and call it."""
namedargs = {}
def set_named_arg(node, key, val):
# If we have no bindings for val, fall back to unsolvable.
# See test_closures.ClosuresTest.test_undefined_var
namedargs[key] = val if val.bindings else self.ctx.new_unsolvable(node)
state, args = state.popn(num)
if starstarargs:
kwnames = abstract_utils.get_atomic_python_constant(starstarargs, tuple)
n = len(args) - len(kwnames)
for key, arg in zip(kwnames, args[n:]):
key = self.ctx.convert.value_to_constant(key.data[0], str)
set_named_arg(state.node, key, arg)
posargs = args[0:n]
starstarargs = None
else:
posargs = args
state, func = state.pop()
return self._call_function_from_stack_helper(
state, func, posargs, namedargs, starargs, starstarargs
)
def call_function_from_stack_311(self, state, num):
"""Pop arguments for a function and call it."""
# We need a separate version of call_function_from_stack for 3.11+
# Stack top is either
# function: [NULL, function, num * arg]
# method: [method, self, num * arg]
m = state.peek(num + 2)
is_meth = not (m.data and isinstance(m.data[0], abstract.Null))
if is_meth:
num += 1
state, args = state.popn(num)
state, func = state.pop()
if not is_meth:
state = state.pop_and_discard() # pop off the NULL
if self._kw_names:
n_kw = len(self._kw_names)
posargs = args[:-n_kw]
kw_vals = args[-n_kw:]
namedargs = dict(zip(self._kw_names, kw_vals))
else:
posargs = args
namedargs = {}
starargs = starstarargs = None
self._kw_names = ()
return self._call_function_from_stack_helper(
state, func, posargs, namedargs, starargs, starstarargs
)
def get_globals_dict(self):
"""Get a real python dict of the globals."""
return self.frame.f_globals
def load_from(
self,
state: frame_state.FrameState,
store: abstract.LazyConcreteDict,
name: str,
discard_concrete_values: bool = False,
) -> tuple[frame_state.FrameState, cfg.Variable]:
"""Load an item out of locals, globals, or builtins."""
if isinstance(store, mixin.LazyMembers):
store.load_lazy_attribute(name)
try:
member = store.members[name]
except KeyError:
return state, self._load_annotation(state.node, name, store)
else:
assert store == self.ctx.convert.unsolvable
return state, self.ctx.new_unsolvable(state.node)
bindings = member.Bindings(state.node)
if (
not bindings
and self._late_annotations_stack
and member.bindings
and all(isinstance(v, abstract.Module) for v in member.data)
):
# Hack: Evaluation of late annotations may create new nodes not within the
# normal program flow, causing imports to not be visible, so we pretend
# that modules are always visible.
bindings = member.bindings
if not bindings:
return state, self._load_annotation(state.node, name, store)
ret = self.ctx.program.NewVariable()
self._filter_none_and_paste_bindings(
state.node,
bindings,
ret,
discard_concrete_values=discard_concrete_values,
)
self.set_var_name(ret, name)
return state, ret
def load_local(self, state, name):
"""Called when a local is loaded onto the stack.
Uses the name to retrieve the value from the current locals().
Args:
state: The current VM state.
name: Name of the local
Returns:
A tuple of the state and the value (cfg.Variable)
Raises:
KeyError: If the name is determined to be undefined
"""
var = self.block_env.get_local(self.frame.current_block, name)
# When the block cfg code is more complete, we can simply create a new
# variable at the current node with var's bindings and return that. For now,
# we just use this as a reachability check to make sure `name` is defined in
# every path through the code.
if (
self.ctx.options.strict_undefined_checks
and self.ctx.python_version >= (3, 10)
and not var
):
raise KeyError()
return self.load_from(state, self.frame.f_locals, name)
def load_global(self, state, name):
# The concrete value of typing.TYPE_CHECKING should be preserved; otherwise,
# concrete values are converted to abstract instances of their types, as we
# generally can't assume that globals are constant.
return self.load_from(
state,
self.frame.f_globals,
name,
discard_concrete_values=name != "TYPE_CHECKING",
)
def load_special_builtin(self, name):
if name == "__any_object__":
# For type_inferencer/tests/test_pgms/*.py, must be a new object
# each time.
return abstract.Unknown(self.ctx)
else:
return self.ctx.special_builtins.get(name)
def load_builtin(self, state, name):
if name == "__undefined__":
# For values that don't exist. (Unlike None, which is a valid object)
return state, self.ctx.convert.empty.to_variable(self.ctx.root_node)
special = self.load_special_builtin(name)
if special:
return state, special.to_variable(state.node)
else:
return self.load_from(state, self.frame.f_builtins, name)
def load_constant(self, state, op, raw_const):
const = self.ctx.convert.constant_to_var(raw_const, node=state.node)
self.trace_opcode(op, raw_const, const)
return state.push(const)
def _load_annotation(self, node, name, store):
annots = abstract_utils.get_annotations_dict(store.members)
if annots:
typ = annots.get_type(node, name)
if typ:
_, ret = self.ctx.annotation_utils.init_annotation(node, name, typ)
store.members[name] = ret
self.set_var_name(ret, name)
return ret
raise KeyError(name)
def _record_local(self, node, op, name, typ, orig_val=None, final=None):
"""Record a type annotation on a local variable.
This method records three types of local operations:
- An annotation, e.g., `x: int`. In this case, `typ` is PyTDClass(int) and
`orig_val` is None.
- An assignment, e.g., `x = 0`. In this case, `typ` is None and `orig_val`
is Instance(int).
- An annotated assignment, e.g., `x: int = None`. In this case, `typ` is
PyTDClass(int) and `orig_val` is Instance(None).
Args:
node: The current node.
op: The current opcode.
name: The variable name.
typ: The annotation.
orig_val: The original value, if any.
final: Whether the annotation is tagged Final (None to preserve any
existing Final tag when updating an existing annotation).
"""
if orig_val:
self.current_local_ops.append(LocalOp(name, LocalOp.Op.ASSIGN))
if typ:
self.current_local_ops.append(LocalOp(name, LocalOp.Op.ANNOTATE))
self._update_annotations_dict(
node,
op,
name,
typ,
orig_val,
self.current_annotated_locals,
final=final,
)
def _update_annotations_dict(
self, node, op, name, typ, orig_val, annotations_dict, final=None
):
if name in annotations_dict:
annotations_dict[name].update(node, op, typ, orig_val)
else:
annotations_dict[name] = abstract_utils.Local(
node, op, typ, orig_val, self.ctx
)
if final is not None:
annotations_dict[name].final = final
def _store_value(self, state, name, value, local):
"""Store 'value' under 'name'."""
m_frame = self.frame
assert m_frame is not None
if local:
self.block_env.store_local(self.frame.current_block, name, value)
target = m_frame.f_locals
else:
target = m_frame.f_globals
node = self.ctx.attribute_handler.set_attribute(
state.node, target, name, value
)
if target is m_frame.f_globals and self.late_annotations:
# We sort the annotations so that a parameterized class's base class is
# resolved before the parameterized class itself.
for annot in sorted(self.late_annotations[name], key=lambda t: t.expr):
annot.resolve(node, m_frame.f_globals, m_frame.f_locals)
return state.change_cfg_node(node)
def store_local(self, state, name, value):
"""Called when a local is written."""
return self._store_value(state, name, value, local=True)
def _process_annotations(self, node, name, value):
"""Process any type annotations in the named value."""
if not value.data or any(
not isinstance(v, mixin.NestedAnnotation) for v in value.data
):
return value
stack = self.simple_stack()
typ = self.ctx.annotation_utils.extract_annotation(node, value, name, stack)
return typ.to_variable(node)
def _apply_annotation(
self, state, op, name, orig_val, annotations_dict, check_types
):
"""Applies the type annotation, if any, associated with this object."""
ann = self.ctx.annotation_utils.apply_annotation(
state.node, op, name, orig_val
)
typ, value = ann.typ, ann.value
final_violation = False
local = False
if annotations_dict is not None:
# If we are assigning to a member that is in the class annotation dict as
# Final, don't raise an error if we are simply analysing the same method
# repeatedly and have hit the STORE_ opcode a second time.
final_violation = (
name in annotations_dict
and annotations_dict[name].final
and op != annotations_dict[name].last_update_op
)
if annotations_dict is self.current_annotated_locals:
local = True
self._record_local(state.node, op, name, typ, orig_val, ann.final)
elif name not in annotations_dict or not annotations_dict[name].typ:
# When updating non-local annotations, we only record the first one
# encountered so that if, say, an instance attribute is annotated in
# both __init__ and another method, the __init__ annotation is used.
self._update_annotations_dict(
state.node, op, name, typ, orig_val, annotations_dict, ann.final
)
if typ is None and name in annotations_dict:
typ = annotations_dict[name].get_type(state.node, name)
if typ == self.ctx.convert.unsolvable:
# An Any annotation can be used to essentially turn off inference in
# cases where it is causing false positives or other issues.
value = self.ctx.new_unsolvable(state.node)
if check_types:
if final_violation:
self.ctx.errorlog.assigning_to_final(self.frames, name, local)
else:
self.ctx.check_annotation_type_mismatch(
state.node, name, typ, orig_val, self.frames, allow_none=True
)
return value
def _get_value_from_annotations(self, state, op, name, local, orig_val):
annotations_dict = self.current_annotated_locals if local else None
value = self._apply_annotation(
state, op, name, orig_val, annotations_dict, check_types=True
)
value = self._process_annotations(state.node, name, value)
return value
def _pop_and_store(self, state, op, name, local):
"""Pop a value off the stack and store it in a variable."""
state, orig_val = state.pop()
if self._branch_tracker.is_current_as_name(
op, name
) and self._branch_tracker.get_current_type_tracker(op, orig_val):
# If we are storing the as name in a case match, i.e.
# case <class-expr> as <name>:
# we need to store the type of <class-expr>, not of the original match
# object (due to the way match statements are compiled into bytecode, the
# match object will be on the stack and retrieved as orig_val)
value = self._branch_tracker.instantiate_case_var(
op, orig_val, state.node
)
else:
value = self._get_value_from_annotations(state, op, name, local, orig_val)
state = state.forward_cfg_node(f"Store:{name}")
state = self._store_value(state, name, value, local)
self.trace_opcode(op, name, value)
return state
def _del_name(self, op, state, name, local):
"""Called when a local or global is deleted."""
value = abstract.Deleted(op.line, self.ctx).to_variable(state.node)
state = state.forward_cfg_node(f"Del:{name}")
state = self._store_value(state, name, value, local)
self.trace_opcode(op, name, value)
return state
def _retrieve_attr(
self, node: cfg.CFGNode, obj: cfg.Variable, attr: str
) -> tuple[cfg.CFGNode, cfg.Variable | None, list[cfg.Binding]]:
"""Load an attribute from an object."""
if (
attr == "__class__"
and self.ctx.callself_stack
and obj.data == self.ctx.callself_stack[-1].data
):
return node, self.ctx.new_unsolvable(node), []
# Resolve the value independently for each value of obj
result = self.ctx.program.NewVariable()
log.debug("getting attr %s from %r", attr, obj)
nodes = []
values_without_attribute = []
for val in obj.bindings:
node2, attr_var = self.ctx.attribute_handler.get_attribute(
node, val.data, attr, val
)
if attr_var is None or not attr_var.bindings:
log.debug("No %s on %s", attr, val.data.__class__)
values_without_attribute.append(val)
continue
log.debug(
"got choice for attr %s from %r of %r (0x%x): %r",
attr,
obj,
val.data,
id(val.data),
attr_var,
)
self._filter_none_and_paste_bindings(node2, attr_var.bindings, result)
nodes.append(node2)
if nodes:
return self.ctx.join_cfg_nodes(nodes), result, values_without_attribute
else:
return node, None, values_without_attribute
def _data_is_none(self, x: abstract.BaseValue) -> bool:
return x.cls == self.ctx.convert.none_type
def _var_is_none(self, v: cfg.Variable) -> bool:
return bool(v.bindings) and all(
self._data_is_none(b.data) for b in v.bindings
)
def _delete_item(self, state, obj, arg):
state, _ = self._call(state, obj, "__delitem__", (arg,))
return state
def load_attr(self, state, obj, attr):
"""Try loading an attribute, and report errors."""
node, result, errors = self._retrieve_attr(state.node, obj, attr)
self._attribute_error_detection(state, attr, errors)
if result is None:
result = self.ctx.new_unsolvable(node)
return state.change_cfg_node(node), result
def _attribute_error_detection(self, state, attr, errors):
if not self.ctx.options.report_errors:
return
for error in errors:
combination = [error]
if self.frame.func:
combination.append(self.frame.func)
if state.node.HasCombination(combination):
self.ctx.errorlog.attribute_error(self.frames, error, attr)
def _filter_none_and_paste_bindings(
self, node, bindings, var, discard_concrete_values=False
):
"""Paste the bindings into var, filtering out false positives on None."""
for b in bindings:
if self._has_strict_none_origins(b):
if (
discard_concrete_values
and isinstance(b.data, abstract.PythonConstant)
and not isinstance(b.data.pyval, str)
):
# We need to keep constant strings as they may be forward references.
var.AddBinding(
self.ctx.convert.get_maybe_abstract_instance(b.data), [b], node
)
else:
var.PasteBinding(b, node)
elif self.ctx.options.strict_none_binding:
var.PasteBinding(b, node)
else:
# TODO(rechen): Remove once --strict-none-binding is fully enabled.
var.AddBinding(self.ctx.convert.unsolvable, [b], node)
def _has_strict_none_origins(self, binding):
"""Whether the binding has any possible origins, with None filtering.
Determines whether the binding has any possibly visible origins at the
current node once we've filtered out false positives on None. The caller
still must call HasCombination() to find out whether these origins are
actually reachable.
Args:
binding: A cfg.Binding.
Returns:
True if there are possibly visible origins, else False.
"""
if not self._analyzing:
return True
has_any_none_origin = False
walker = cfg_utils.walk_binding(
binding, keep_binding=lambda b: self._data_is_none(b.data)
)
origin = None
while True:
try:
origin = walker.send(origin)
except StopIteration:
break
for source_set in origin.source_sets:
if not source_set:
if self.ctx.program.is_reachable(
src=self.frame.node, dst=origin.where
):
# Checking for reachability works because the current part of the
# graph hasn't been connected back to the analyze node yet. Since
# the walker doesn't preserve information about the relationship
# among origins, we pretend we have a disjunction over source sets.
return True
has_any_none_origin = True
return not has_any_none_origin
def load_attr_noerror(self, state, obj, attr):
"""Try loading an attribute, ignore errors."""
node, result, _ = self._retrieve_attr(state.node, obj, attr)
return state.change_cfg_node(node), result
def store_attr(
self,
state: frame_state.FrameState,
obj: cfg.Variable,
attr: str,
value: cfg.Variable,
) -> frame_state.FrameState:
"""Set an attribute on an object."""
if not obj.bindings:
log.info("Ignoring setattr on %r", obj)
return state
nodes = []
for val in obj.Filter(state.node, strict=False):
# TODO(b/172045608): Check whether val.data is a descriptor (i.e. has
# "__set__")
nodes.append(
self.ctx.attribute_handler.set_attribute(
state.node, val.data, attr, value
)
)
if nodes:
return state.change_cfg_node(self.ctx.join_cfg_nodes(nodes))
else:
return state
def del_attr(self, state, obj, attr):
"""Delete an attribute."""
log.info(
"Attribute removal does not do anything in the abstract interpreter"
)
return state
def _handle_311_pattern_match_on_dict(self, state, op, obj, ret):
"""Handle DELETE_SUBSCR within a pattern match in 3.11."""
# Very specific hack for pattern matching in 3.11. When cpython
# compiles a match statement it dups the match object onto the stack
# several times, which makes type narrowing complicated. Here we check for
# the compilation pattern of `case {key: val, ..., **rest}` which calls
# DELETE_SUBSCR on the concrete keys and binds the remaining dict to `rest`
# (3.10 had a specific COPY_DICT_WITHOUT_KEYS opcode to handle this but it
# was removed in 3.11).
if not (
self.ctx.python_version >= (3, 11)
and op.line in self._branch_tracker.matches.match_cases
):
return state
if state.top() == obj:
state = state.pop_and_discard()
return state.push(ret)
return state
def del_subscr(self, state, op, obj, subscr):
"""Implementation of del obj[subscr]."""
# Handle the special case of deleting a concrete key from a concrete dict.
try:
d = abstract_utils.get_atomic_python_constant(obj, dict)
k = abstract_utils.get_atomic_python_constant(subscr, str)
except abstract_utils.ConversionError:
pass
else:
if k in d:
keys = self.ctx.convert.build_tuple(state.node, [subscr])
ret = vm_utils.copy_dict_without_keys(state.node, obj, keys, self.ctx)
state = self._store_new_var_in_local(state, obj, ret)
state = self._handle_311_pattern_match_on_dict(state, op, obj, ret)
return self._delete_item(state, obj, subscr)
def pop_varargs(self, state):
"""Retrieve a varargs tuple from the stack. Used by call_function."""
return state.pop()
def pop_kwargs(self, state):
"""Retrieve a kwargs dictionary from the stack. Used by call_function."""
return state.pop()
def import_module(self, name, full_name, level, bypass_strict=False):
"""Import a module and return the module object or None."""
if self.ctx.options.strict_import:
# Do not import new modules if we aren't in an IMPORT statement.
# The exception is if we have an implicit "package" module (e.g.
# `import a.b.c` adds `a.b` to the list of instantiable modules.)
if not (
self._importing
or self.ctx.loader.has_module_prefix(full_name)
or bypass_strict
):
return None
try:
module = self._import_module(name, level)
# Since we have explicitly imported full_name, add it to the prefix list.
self.ctx.loader.add_module_prefixes(full_name)
except (
parser.ParseError,
load_pytd.BadDependencyError,
visitors.ContainerError,
visitors.SymbolLookupError,
visitors.LiteralValueError,
) as e:
self.ctx.errorlog.pyi_error(self.frames, full_name, e)
module = self.ctx.convert.unsolvable
return module
def _maybe_load_overlay(self, name):
"""Check if a module path is in the overlay dictionary."""
if name not in overlay_dict.overlays:
return None
if name in self.loaded_overlays:
overlay = self.loaded_overlays[name]
else:
overlay = overlay_dict.overlays[name](self.ctx)
# The overlay should be available only if the underlying pyi is.
if overlay.ast:
self.loaded_overlays[name] = overlay
else:
overlay = self.loaded_overlays[name] = None
return overlay
def _import_module(self, name, level):
"""Import the module and return the module object.
Args:
name: Name of the module. E.g. "sys".
level: Specifies whether to use absolute or relative imports. -1: (Python
<= 3.1) "Normal" import. Try both relative and absolute.
0: Absolute import.
1: "from . import abc"
2: "from .. import abc" etc.
Returns:
An instance of abstract.Module or None if we couldn't find the module.
"""
key = (name, level)
if key not in self._imported_modules_cache:
self._imported_modules_cache[key] = self._do_import_module(name, level)
return self._imported_modules_cache[key]
def _do_import_module(self, name, level):
if name:
if level <= 0:
assert level in [-1, 0]
overlay = self._maybe_load_overlay(name)
if overlay:
return overlay
if level == -1 and self.ctx.loader.base_module:
# Python 2 tries relative imports first.
ast = self.ctx.loader.import_relative_name(
name
) or self.ctx.loader.import_name(name)
else:
ast = self.ctx.loader.import_name(name)
else:
# "from .x import *"
base = self.ctx.loader.import_relative(level)
if base is None:
return None
full_name = base.name + "." + name
overlay = self._maybe_load_overlay(full_name)
if overlay:
return overlay
ast = self.ctx.loader.import_name(full_name)
else:
assert level > 0
ast = self.ctx.loader.import_relative(level)
if ast:
return self.ctx.convert.constant_to_value(
ast, subst=datatypes.AliasingDict(), node=self.ctx.root_node
)
else:
return None
def unary_operator(self, state, name):
state, x = state.pop()
state, result = self._call(state, x, name, ())
state = state.push(result)
return state
def _is_classmethod_cls_arg(self, var):
"""True if var is the first arg of a class method in the current frame."""
if not (self.frame.func and self.frame.first_arg):
return False
func = self.frame.func.data
if func.is_classmethod or func.name.rsplit(".")[-1] == "__new__":
is_cls = not set(var.data) - set(self.frame.first_arg.data)
return is_cls
return False
def expand_bool_result(self, node, left, right, name, maybe_predicate):
"""Common functionality for 'is' and 'is not'."""
if self._is_classmethod_cls_arg(left) or self._is_classmethod_cls_arg(
right
):
# If cls is the first argument of a classmethod, it could be bound to
# either the defining class or one of its subclasses, so `is` is
# ambiguous.
return self.ctx.new_unsolvable(node)
result = self.ctx.program.NewVariable()
for x in left.bindings:
for y in right.bindings:
pyval = maybe_predicate(x.data, y.data)
result.AddBinding(
self.ctx.convert.bool_values[pyval], source_set=(x, y), where=node
)
return result
def _get_aiter(self, state, obj):
"""Get an async iterator from an object."""
state, func = self.load_attr(state, obj, "__aiter__")
if func:
return self.call_function_with_state(state, func, ())
else:
return state, self.ctx.new_unsolvable(state.node)
def _get_iter(self, state, seq, report_errors=True):
"""Get an iterator from a sequence."""
# TODO(b/201603421): We should iterate through seq's bindings, in order to
# fetch the attribute on the sequence's class, but two problems prevent us
# from doing so:
# - Iterating through individual bindings causes a performance regression.
# - Because __getitem__ is used for annotations, pytype sometime thinks the
# class attribute is AnnotationClass.getitem_slot.
state, func = self.load_attr_noerror(state, seq, "__iter__")
if func:
# Call __iter__()
state, itr = self.call_function_with_state(state, func, ())
else:
node, func, missing = self._retrieve_attr(state.node, seq, "__getitem__")
state = state.change_cfg_node(node)
if func:
# Call __getitem__(int).
state, item = self.call_function_with_state(
state, func, (self.ctx.convert.build_int(state.node),)
)
# Create a new iterator from the returned value.
itr = abstract.Iterator(self.ctx, item).to_variable(state.node)
else:
itr = self.ctx.program.NewVariable()
if report_errors and self.ctx.options.report_errors:
for m in missing:
if state.node.HasCombination([m]):
self.ctx.errorlog.attribute_error(self.frames, m, "__iter__")
return state, itr
def byte_NOP(self, state, op):
return state
def byte_UNARY_NOT(self, state, op):
"""Implement the UNARY_NOT bytecode."""
state, var = state.pop()
true_bindings = [
b for b in var.bindings if compare.compatible_with(b.data, True)
]
false_bindings = [
b for b in var.bindings if compare.compatible_with(b.data, False)
]
if len(true_bindings) == len(false_bindings) == len(var.bindings):
# No useful information from bindings, use a generic bool value.
# This is merely an optimization rather than building separate True/False
# values each with the same bindings as var.
result = self.ctx.convert.build_bool(state.node)
else:
# Build a result with True/False values, each bound to appropriate
# bindings. Note that bindings that are True get attached to a result
# that is False and vice versa because this is a NOT operation.
result = self.ctx.program.NewVariable()
for b in true_bindings:
result.AddBinding(
self.ctx.convert.bool_values[False],
source_set=(b,),
where=state.node,
)
for b in false_bindings:
result.AddBinding(
self.ctx.convert.bool_values[True],
source_set=(b,),
where=state.node,
)
state = state.push(result)
return state
def byte_UNARY_NEGATIVE(self, state, op):
return self.unary_operator(state, "__neg__")
def byte_UNARY_POSITIVE(self, state, op):
return self.unary_operator(state, "__pos__")
def byte_UNARY_INVERT(self, state, op):
return self.unary_operator(state, "__invert__")
def byte_BINARY_MATRIX_MULTIPLY(self, state, op):
return self.binary_operator(state, "__matmul__")
def byte_BINARY_ADD(self, state, op):
return self.binary_operator(state, "__add__")
def byte_BINARY_SUBTRACT(self, state, op):
return self.binary_operator(state, "__sub__")
def byte_BINARY_MULTIPLY(self, state, op):
return self.binary_operator(state, "__mul__")
def byte_BINARY_MODULO(self, state, op):
return self.binary_operator(state, "__mod__")
def byte_BINARY_LSHIFT(self, state, op):
return self.binary_operator(state, "__lshift__")
def byte_BINARY_RSHIFT(self, state, op):
return self.binary_operator(state, "__rshift__")
def byte_BINARY_AND(self, state, op):
return self.binary_operator(state, "__and__")
def byte_BINARY_XOR(self, state, op):
return self.binary_operator(state, "__xor__")
def byte_BINARY_OR(self, state, op):
return self.binary_operator(state, "__or__")
def byte_BINARY_FLOOR_DIVIDE(self, state, op):
return self.binary_operator(state, "__floordiv__")
def byte_BINARY_TRUE_DIVIDE(self, state, op):
return self.binary_operator(state, "__truediv__")
def byte_BINARY_POWER(self, state, op):
return self.binary_operator(state, "__pow__")
def byte_BINARY_SUBSCR(self, state, op):
return self.binary_operator(state, "__getitem__")
def byte_INPLACE_MATRIX_MULTIPLY(self, state, op):
return self.inplace_operator(state, "__imatmul__")
def byte_INPLACE_ADD(self, state, op):
return self.inplace_operator(state, "__iadd__")
def byte_INPLACE_SUBTRACT(self, state, op):
return self.inplace_operator(state, "__isub__")
def byte_INPLACE_MULTIPLY(self, state, op):
return self.inplace_operator(state, "__imul__")
def byte_INPLACE_MODULO(self, state, op):
return self.inplace_operator(state, "__imod__")
def byte_INPLACE_POWER(self, state, op):
return self.inplace_operator(state, "__ipow__")
def byte_INPLACE_LSHIFT(self, state, op):
return self.inplace_operator(state, "__ilshift__")
def byte_INPLACE_RSHIFT(self, state, op):
return self.inplace_operator(state, "__irshift__")
def byte_INPLACE_AND(self, state, op):
return self.inplace_operator(state, "__iand__")
def byte_INPLACE_XOR(self, state, op):
return self.inplace_operator(state, "__ixor__")
def byte_INPLACE_OR(self, state, op):
return self.inplace_operator(state, "__ior__")
def byte_INPLACE_FLOOR_DIVIDE(self, state, op):
return self.inplace_operator(state, "__ifloordiv__")
def byte_INPLACE_TRUE_DIVIDE(self, state, op):
return self.inplace_operator(state, "__itruediv__")
def byte_LOAD_CONST(self, state, op):
try:
raw_const = self.frame.f_code.consts[op.arg]
except IndexError:
# We have tried to access an undefined closure variable.
# There is an associated LOAD_DEREF failure where the error will be
# raised, so we just return unsolvable here.
# See test_closures.ClosuresTest.test_undefined_var
return state.push(self.ctx.new_unsolvable(state.node))
return self.load_constant(state, op, raw_const)
def byte_LOAD_FOLDED_CONST(self, state, op):
const = op.arg
state, var = constant_folding.build_folded_type(self.ctx, state, const)
return state.push(var)
def byte_SETUP_EXCEPT_311(self, state, op):
return self._setup_except(state, op)
def byte_POP_TOP(self, state, op):
return state.pop_and_discard()
def byte_DUP_TOP(self, state, op):
return state.push(state.top())
def byte_DUP_TOP_TWO(self, state, op):
state, (a, b) = state.popn(2)
return state.push(a, b, a, b)
def byte_ROT_TWO(self, state, op):
return state.rotn(2)
def byte_ROT_THREE(self, state, op):
return state.rotn(3)
def byte_ROT_FOUR(self, state, op):
return state.rotn(4)
def byte_ROT_N(self, state, op):
return state.rotn(op.arg)
def _is_private(self, name):
return name.startswith("_") and not name.startswith("__")
def _name_error_or_late_annotation(self, state, name):
"""Returns a late annotation or returns Any and logs a name error."""
if self._late_annotations_stack and self.late_annotations is not None:
annot = abstract.LateAnnotation(
name, self._late_annotations_stack, self.ctx
)
log.info("Created %r", annot)
self.late_annotations[name].append(annot)
return annot
else:
details = vm_utils.get_name_error_details(state, name, self.ctx)
if details:
details = "Note: " + details.to_error_message()
self.ctx.errorlog.name_error(self.frames, name, details=details)
return self.ctx.convert.unsolvable
def byte_LOAD_NAME(self, state, op):
"""Load a name. Can be a local, global, or builtin."""
name = op.argval
deleted_var = None
try:
state, val = self.load_local(state, name)
if vm_utils.is_deleted_name(state, val):
deleted_var = val
raise KeyError()
except KeyError as e_local:
try:
state, val = self.load_global(state, name)
if vm_utils.is_deleted_name(state, val):
deleted_var = val
raise e_local
except KeyError as e_global:
try:
if self._is_private(name):
# Private names must be explicitly imported.
self.trace_opcode(op, name, None)
raise KeyError(name) from e_global
state, val = self.load_builtin(state, name)
if vm_utils.is_deleted_name(state, val):
deleted_var = val
raise e_global
except KeyError:
if deleted_var is not None:
vm_utils.check_for_deleted(state, name, deleted_var, self.ctx)
self.trace_opcode(op, name, deleted_var)
return state.push(deleted_var)
if self._is_private(name) or not self.has_unknown_wildcard_imports:
one_val = self._name_error_or_late_annotation(state, name)
else:
one_val = self.ctx.convert.unsolvable
self.trace_opcode(op, name, None)
return state.push(one_val.to_variable(state.node))
vm_utils.check_for_deleted(state, name, val, self.ctx)
self.trace_opcode(op, name, val)
return state.push(val)
def byte_STORE_NAME(self, state, op):
name = op.argval
return self._pop_and_store(state, op, name, local=True)
def byte_DELETE_NAME(self, state, op):
name = op.argval
return self._del_name(op, state, name, local=True)
def _load_fast(
self, state, op, name, on_uninitialized=_UninitializedBehavior.ERROR
):
"""Load a local. Unlike LOAD_NAME, it doesn't fall back to globals."""
try:
state, val = self.load_local(state, name)
except KeyError:
# Variables with a ".n" naming scheme are created by the interpreter under
# the hood to store things like iterators for list comprehensions. Even if
# something goes wrong, we should not expose this implementation detail to
# the user.
if re.fullmatch(r"\.\d+", name):
val = self.ctx.new_unsolvable(state.node)
elif on_uninitialized == _UninitializedBehavior.PUSH_NULL:
val = abstract.Null(self.ctx).to_variable(state.node)
else:
val = self._name_error_or_late_annotation(state, name).to_variable(
state.node
)
if on_uninitialized == _UninitializedBehavior.PUSH_NULL:
if any(isinstance(x, abstract.Deleted) for x in val.Data(state.node)):
val = abstract.Null(self.ctx).to_variable(state.node)
else:
vm_utils.check_for_deleted(state, name, val, self.ctx)
self.trace_opcode(op, name, val)
return state.push(val)
def byte_LOAD_FAST(self, state, op):
name = op.argval
return self._load_fast(state, op, name)
def byte_LOAD_FAST_CHECK(self, state, op):
name = op.argval
return self._load_fast(state, op, name)
def byte_LOAD_FAST_AND_CLEAR(self, state, op):
name = op.argval
state = self._load_fast(state, op, name, _UninitializedBehavior.PUSH_NULL)
# According to the docs, we need to set the value to NULL. Since this is
# accessing "fast locals", setting to NULL is equalivent to deleting the
# value in f_locals.
return self._del_name(op, state, name, local=True)
def byte_STORE_FAST(self, state, op):
name = op.argval
top = state.top()
if top.data and isinstance(top.data[0], abstract.Null):
# Storing NULL in a "fast local" is equalivent to deleting the value in
# f_locals.
return self._del_name(op, state.pop_and_discard(), name, local=True)
else:
return self._pop_and_store(state, op, name, local=True)
def byte_DELETE_FAST(self, state, op):
name = op.argval
return self._del_name(op, state, name, local=True)
def byte_LOAD_GLOBAL(self, state, op):
"""Load a global variable, or fall back to trying to load a builtin."""
if self.ctx.python_version >= (3, 11) and op.arg & 1:
# Compiler-generated marker that will be consumed in byte_CALL
# We are loading a global and calling it as a function.
state = self._push_null(state)
name = op.argval
if name == "None":
# Load None itself as a constant to avoid the None filtering done on
# variables. This workaround is safe because assigning to None is a
# syntax error.
return self.load_constant(state, op, None)
try:
state, val = self.load_global(state, name)
except KeyError:
try:
state, val = self.load_builtin(state, name)
except KeyError:
self.trace_opcode(op, name, None)
ret = self._name_error_or_late_annotation(state, name)
return state.push(ret.to_variable(state.node))
vm_utils.check_for_deleted(state, name, val, self.ctx)
self.trace_opcode(op, name, val)
return state.push(val)
def byte_STORE_GLOBAL(self, state, op):
name = op.argval
return self._pop_and_store(state, op, name, local=False)
def byte_DELETE_GLOBAL(self, state, op):
name = op.argval
return self._del_name(op, state, name, local=False)
def byte_LOAD_CLOSURE(self, state, op):
"""Retrieves a value out of a cell."""
return vm_utils.load_closure_cell(state, op, False, self.ctx)
def byte_LOAD_DEREF(self, state, op):
"""Retrieves a value out of a cell."""
return vm_utils.load_closure_cell(state, op, True, self.ctx)
def byte_STORE_DEREF(self, state, op):
"""Stores a value in a closure cell."""
state, value = state.pop()
assert isinstance(value, cfg.Variable)
name = op.argval
value = self._apply_annotation(
state, op, name, value, self.current_annotated_locals, check_types=True
)
state = state.forward_cfg_node(f"StoreDeref:{name}")
self.frame.get_cell_by_name(name).PasteVariable(value, state.node)
self.trace_opcode(op, name, value)
return state
def byte_DELETE_DEREF(self, state, op):
value = abstract.Deleted(op.line, self.ctx).to_variable(state.node)
name = op.argval
state = state.forward_cfg_node(f"DelDeref:{name}")
self.frame.get_cell_by_name(name).PasteVariable(value, state.node)
self.trace_opcode(op, name, value)
return state
def byte_LOAD_CLASSDEREF(self, state, op):
"""Retrieves a value out of either locals or a closure cell."""
name = op.argval
try:
state, val = self.load_local(state, name)
self.trace_opcode(op, name, val)
return state.push(val)
except KeyError:
return vm_utils.load_closure_cell(state, op, False, self.ctx)
def _cmp_rel(self, state, op_name, x, y):
"""Implementation of relational operators CMP_(LT|LE|EQ|NE|GE|GT).
Args:
state: Initial FrameState.
op_name: An operator name, e.g., "EQ".
x: A variable of the lhs value.
y: A variable of the rhs value.
Returns:
A tuple of the new FrameState and the return variable.
"""
ret = self.ctx.program.NewVariable()
# A variable of the values without a special cmp_rel implementation. Needed
# because overloaded __eq__ implementations do not necessarily return a
# bool; see, e.g., test_overloaded in test_cmp.
leftover_x = self.ctx.program.NewVariable()
leftover_y = self.ctx.program.NewVariable()
op_not_eq = op_name not in ("EQ", "NE")
reported = False
for b1 in x.bindings:
for b2 in y.bindings:
op = getattr(slots, op_name)
try:
err = False
val = compare.cmp_rel(self.ctx, op, b1.data, b2.data)
except compare.CmpTypeError:
val = None
if state.node.HasCombination([b1, b2]):
err = True
reported = True # do not report the wrong-arg-types as well
self.ctx.errorlog.unsupported_operands(self.frames, op, x, y)
if val is None:
# We have to special-case classes here, since getattribute(class, op)
# gets the class method, not the instance method of the metaclass, and
# raises an error message referring to the comparator method on
# `object` in addition to the error thrown by compare.
# TODO(b/205755440): We fail (with the aforementioned bad error
# message) when the comparator method is defined on a metaclass, since
# compare only raises an error for classes with metaclass=type.
if op_not_eq and isinstance(b1.data, abstract.Class) and err:
ret.AddBinding(self.ctx.convert.unsolvable, {b1, b2}, state.node)
elif isinstance(b1.data, abstract.SequenceLength):
# `None` is a meaningful return value when pattern matching
ret.AddBinding(
self.ctx.convert.bool_values[val], {b1, b2}, state.node
)
else:
leftover_x.PasteBinding(b1, state.node)
leftover_y.PasteBinding(b2, state.node)
else:
ret.AddBinding(
self.ctx.convert.bool_values[val], {b1, b2}, state.node
)
if leftover_x.bindings:
op = f"__{op_name.lower()}__"
# If we do not already have a return value, raise any errors caught by the
# overloaded comparison method.
report_errors = op_not_eq and not bool(ret.bindings) and not reported
state, leftover_ret = vm_utils.call_binary_operator(
state,
op,
leftover_x,
leftover_y,
report_errors=report_errors,
ctx=self.ctx,
)
ret.PasteVariable(leftover_ret, state.node)
return state, ret
def _coerce_to_bool(self, var, true_val=True):
"""Coerce the values in a variable to bools."""
bool_var = self.ctx.program.NewVariable()
for b in var.bindings:
v = b.data
if isinstance(v, abstract.PythonConstant) and isinstance(v.pyval, bool):
const = v.pyval is true_val
elif not compare.compatible_with(v, True):
const = not true_val
elif not compare.compatible_with(v, False):
const = true_val
else:
const = None
bool_var.PasteBindingWithNewData(b, self.ctx.convert.bool_values[const])
return bool_var
def _cmp_in(self, state, item, seq, true_val=True):
"""Implementation of CMP_IN/CMP_NOT_IN."""
state, has_contains = self.load_attr_noerror(state, seq, "__contains__")
if has_contains:
state, ret = vm_utils.call_binary_operator(
state, "__contains__", seq, item, report_errors=True, ctx=self.ctx
)
if ret.bindings:
ret = self._coerce_to_bool(ret, true_val=true_val)
else:
# For an object without a __contains__ method, cmp_in falls back to
# checking item against the items produced by seq's iterator.
state, itr = self._get_iter(state, seq, report_errors=False)
if len(itr.bindings) < len(seq.bindings):
# seq does not have any of __contains__, __iter__, and __getitem__.
# (The last two are checked by _get_iter.)
self.ctx.errorlog.unsupported_operands(
self.frames, "__contains__", seq, item
)
ret = self.ctx.convert.build_bool(state.node)
return state, ret
def _cmp_is_always_supported(self, op_arg):
"""Checks if the comparison should always succeed."""
return op_arg in slots.CMP_ALWAYS_SUPPORTED
def _instantiate_exception(self, node, exc_type):
"""Instantiate an exception type.
Args:
node: The current node.
exc_type: A cfg.Variable of the exception type.
Returns:
A tuple of a cfg.Variable of the instantiated type and a list of
the flattened exception types in the data of exc_type. None takes the
place of invalid types.
"""
value = self.ctx.program.NewVariable()
types = []
stack = list(exc_type.data)
while stack:
e = stack.pop()
if isinstance(e, abstract.Tuple):
for sub_exc_type in e.pyval:
sub_value, sub_types = self._instantiate_exception(node, sub_exc_type)
value.PasteVariable(sub_value)
types.extend(sub_types)
elif (
isinstance(e, abstract.Instance)
and e.cls.full_name == "builtins.tuple"
):
sub_exc_type = e.get_instance_type_parameter(abstract_utils.T)
sub_value, sub_types = self._instantiate_exception(node, sub_exc_type)
value.PasteVariable(sub_value)
types.extend(sub_types)
elif isinstance(e, abstract.Class) and any(
base.full_name == "builtins.BaseException"
or isinstance(base, abstract.AMBIGUOUS_OR_EMPTY)
for base in e.mro
):
value.PasteVariable(self.init_class(node, e))
types.append(e)
elif isinstance(e, abstract.Union):
stack.extend(e.options)
else:
if not isinstance(e, abstract.AMBIGUOUS_OR_EMPTY):
if isinstance(e, abstract.Class):
mro_seqs = [e.mro] if isinstance(e, abstract.Class) else []
msg = f"{e.name} does not inherit from BaseException"
else:
mro_seqs = []
msg = "Not a class"
self.ctx.errorlog.mro_error(
self.frames, e.name, mro_seqs, details=msg
)
value.AddBinding(self.ctx.convert.unsolvable, [], node)
types.append(None)
return value, types
def _replace_abstract_exception(self, state, exc_type):
"""Replace unknowns added by push_abstract_exception with precise values."""
# When the `try` block is set up, push_abstract_exception pushes on
# unknowns for the value and exception type. At the beginning of the
# `except` block, when we know the exception being caught, we can replace
# the unknowns with more useful variables.
value, types = self._instantiate_exception(state.node, exc_type)
if self.ctx.python_version >= (3, 11):
state, _ = state.pop()
state = state.push(value)
else:
if None in types:
exc_type = self.ctx.new_unsolvable(state.node)
# See SETUP_FINALLY: we push the exception on twice.
state, (_, _, tb, _, _) = state.popn(5)
state = state.push(value, exc_type, tb, value, exc_type)
return state
def _compare_op(self, state, op_arg, op):
"""Pops and compares the top two stack values and pushes a boolean."""
state, (x, y) = state.popn(2)
self._branch_tracker.register_match_type(op)
match_enum = self._branch_tracker.add_cmp_branch(op, op_arg, x, y)
if match_enum is not None:
# The match always succeeds/fails.
ret = self.ctx.convert.bool_values[match_enum].to_variable(state.node)
if match_enum is False: # pylint: disable=g-bool-id-comparison
case_val = abstract_utils.get_atomic_value(y)
if isinstance(case_val, abstract.ConcreteValue):
# This is a Literal match
name = repr(case_val.pyval)
else:
# This is an enum match
name = case_val.name
self.ctx.errorlog.redundant_match(self.frames, name)
return state.push(ret)
# Explicit, redundant, switch statement, to make it easier to address the
# behavior of individual compare operations:
if op_arg == slots.CMP_LT:
state, ret = self._cmp_rel(state, "LT", x, y)
elif op_arg == slots.CMP_LE:
state, ret = self._cmp_rel(state, "LE", x, y)
elif op_arg == slots.CMP_EQ:
state, ret = self._cmp_rel(state, "EQ", x, y)
elif op_arg == slots.CMP_NE:
state, ret = self._cmp_rel(state, "NE", x, y)
elif op_arg == slots.CMP_GT:
state, ret = self._cmp_rel(state, "GT", x, y)
elif op_arg == slots.CMP_GE:
state, ret = self._cmp_rel(state, "GE", x, y)
elif op_arg == slots.CMP_IS:
ret = self.expand_bool_result(
state.node, x, y, "is_cmp", frame_state.is_cmp
)
elif op_arg == slots.CMP_IS_NOT:
ret = self.expand_bool_result(
state.node, x, y, "is_not_cmp", frame_state.is_not_cmp
)
elif op_arg == slots.CMP_NOT_IN:
state, ret = self._cmp_in(state, x, y, true_val=False)
elif op_arg == slots.CMP_IN:
state, ret = self._cmp_in(state, x, y)
elif op_arg == slots.CMP_EXC_MATCH:
state = self._replace_abstract_exception(state, y)
ret = self.ctx.convert.build_bool(state.node)
else:
raise VirtualMachineError("Invalid argument to COMPARE_OP: %d" % op_arg)
if not ret.bindings and self._cmp_is_always_supported(op_arg):
# Some comparison operations are always supported, depending on the target
# Python version. In this case, always return a (boolean) value.
# (https://docs.python.org/2/library/stdtypes.html#comparisons or
# (https://docs.python.org/3/library/stdtypes.html#comparisons)
ret.AddBinding(self.ctx.convert.primitive_instances[bool], [], state.node)
return state.push(ret)
def byte_COMPARE_OP(self, state, op):
return self._compare_op(state, op.argval, op)
def byte_IS_OP(self, state, op):
if op.arg:
op_arg = slots.CMP_IS_NOT
else:
op_arg = slots.CMP_IS
return self._compare_op(state, op_arg, op)
def byte_CONTAINS_OP(self, state, op):
if op.arg:
op_arg = slots.CMP_NOT_IN
else:
op_arg = slots.CMP_IN
return self._compare_op(state, op_arg, op)
def byte_LOAD_ATTR(self, state, op):
"""Pop an object, and retrieve a named attribute from it."""
name = op.argval
state, obj = state.pop()
log.debug("LOAD_ATTR: %r %r", obj, name)
if self.ctx.python_version >= (3, 12) and op.arg & 1:
state, val = self._load_method(state, obj, name)
else:
with self._suppress_opcode_tracing():
# LOAD_ATTR for @property methods generates an extra opcode trace for
# the implicit function call, which we do not want.
state, val = self.load_attr(state, obj, name)
state = state.push(val)
# We need to trace both the object and the attribute.
self.trace_opcode(op, name, (obj, val))
return state
def _get_type_of_attr_to_store(self, node, op, obj, name):
"""Grabs the __annotations__ dict, if any, with the attribute type."""
check_type = True
annotations_dict = None
for obj_val in obj.data:
if isinstance(obj_val, abstract.InterpreterClass):
maybe_cls = obj_val
else:
maybe_cls = obj_val.cls
if isinstance(maybe_cls, abstract.InterpreterClass):
if (
"__annotations__" not in maybe_cls.members
and op.line in self._director.annotations
):
# The class has no annotated class attributes but does have an
# annotated instance attribute.
cur_annotations_dict = abstract.AnnotationsDict({}, self.ctx)
maybe_cls.members["__annotations__"] = (
cur_annotations_dict.to_variable(self.ctx.root_node)
)
cur_annotations_dict = abstract_utils.get_annotations_dict(
maybe_cls.members
)
if cur_annotations_dict:
cur_annotations_dict = cur_annotations_dict.annotated_locals
elif (
isinstance(maybe_cls, abstract.PyTDClass)
and maybe_cls != self.ctx.convert.type_type
):
node, attr = self.ctx.attribute_handler.get_attribute(
node, obj_val, name, obj_val.to_binding(node)
)
if attr:
typ = self.ctx.convert.merge_classes(attr.data)
cur_annotations_dict = {
name: abstract_utils.Local(node, op, typ, None, self.ctx)
}
else:
cur_annotations_dict = None
# In a PyTDClass, we can't distinguish between an inferred type and an
# annotation. Even though we don't check against the attribute type, we
# still apply it so that setting an attribute value on an instance of a
# class doesn't affect the attribute type in other instances.
check_type = False
# We can still check for final members being assigned to.
if name in maybe_cls.final_members:
self.ctx.errorlog.assigning_to_final(self.frames, name, local=False)
elif (
isinstance(obj_val, abstract.Instance)
and "__annotations__" in obj_val.members
):
# Some overlays add an __annotations__ dict to an abstract.Instance to
# replicate runtime type checks on individual instances.
annot = abstract_utils.get_annotations_dict(obj_val.members)
assert annot is not None
cur_annotations_dict = annot.annotated_locals
else:
cur_annotations_dict = None
if cur_annotations_dict is not None:
if annotations_dict is None:
annotations_dict = cur_annotations_dict
else:
for k, v in cur_annotations_dict.items():
# pylint: disable=unsupported-assignment-operation,unsupported-membership-test
if k in annotations_dict:
annotations_dict[k] = abstract_utils.Local.merge(
node, op, annotations_dict[k], v
)
else:
annotations_dict[k] = v
# pylint: enable=unsupported-assignment-operation,unsupported-membership-test
return node, annotations_dict, check_type
def byte_STORE_ATTR(self, state, op):
"""Store an attribute."""
name = op.argval
state, (val, obj) = state.popn(2)
node, annotations_dict, check_attribute_types = (
self._get_type_of_attr_to_store(state.node, op, obj, name)
)
state = state.change_cfg_node(node)
val = self._apply_annotation(
state, op, name, val, annotations_dict, check_attribute_types
)
state = state.forward_cfg_node(f"StoreAttr:{name}")
state = self.store_attr(state, obj, name, val)
# We need to trace both the object and the attribute.
self.trace_opcode(op, name, (obj, val))
return state
def byte_DELETE_ATTR(self, state, op):
name = op.argval
state, obj = state.pop()
return self.del_attr(state, obj, name)
def store_subscr(self, state, obj, key, val):
state, _ = self._call(state, obj, "__setitem__", (key, val))
return state
def _record_annotation_dict_store(self, state, obj, subscr, val, op):
"""Record a store_subscr to an __annotations__ dict."""
try:
name = abstract_utils.get_atomic_python_constant(subscr, str)
except abstract_utils.ConversionError:
pass
else:
typ = self.ctx.annotation_utils.extract_annotation(
state.node,
val,
name,
self.simple_stack(),
allowed_type_params=self.frame.type_params,
)
self._record_annotation(state.node, op, name, typ)
def byte_STORE_SUBSCR(self, state, op):
"""Implement obj[subscr] = val."""
state, (val, obj, subscr) = state.popn(3)
state = state.forward_cfg_node("StoreSubscr")
# Check whether obj is the __annotations__ dict.
if abstract_utils.match_atomic_value(obj, abstract.AnnotationsDict):
if all(abstract_utils.is_ellipsis(v) for v in val.data):
# '...' is an experimental "inferred type": see b/213607272.
pass
else:
if abstract_utils.match_atomic_value(val, abstract.FinalAnnotation):
val = val.data[0].annotation.to_variable(state.node)
self._record_annotation_dict_store(state, obj, subscr, val, op)
state = self.store_subscr(state, obj, subscr, val)
return state
def byte_DELETE_SUBSCR(self, state, op):
state, (obj, subscr) = state.popn(2)
return self.del_subscr(state, op, obj, subscr)
def byte_BUILD_TUPLE(self, state, op):
count = op.arg
state, elts = state.popn(count)
return state.push(self.ctx.convert.build_tuple(state.node, elts))
def byte_BUILD_LIST(self, state, op):
count = op.arg
state, elts = state.popn(count)
state = state.push(self.ctx.convert.build_list(state.node, elts))
return state.forward_cfg_node("BuildList")
def byte_BUILD_SET(self, state, op):
count = op.arg
state, elts = state.popn(count)
return state.push(self.ctx.convert.build_set(state.node, elts))
def byte_BUILD_MAP(self, state, op):
"""Build a dictionary."""
the_map = self.ctx.convert.build_map(state.node)
state, args = state.popn(2 * op.arg)
for i in range(op.arg):
key, val = args[2 * i], args[2 * i + 1]
state = self.store_subscr(state, the_map, key, val)
return state.push(the_map)
def _get_literal_sequence(self, data):
"""Helper function for _unpack_sequence."""
try:
return self.ctx.convert.value_to_constant(data, tuple)
except abstract_utils.ConversionError:
# Fall back to looking for a literal list and converting to a tuple
try:
return tuple(self.ctx.convert.value_to_constant(data, list))
except abstract_utils.ConversionError:
for base in data.cls.mro:
if isinstance(base, abstract.TupleClass) and not base.formal:
# We've found a TupleClass with concrete parameters, which means
# we're a subclass of a heterogeneous tuple (usually a
# typing.NamedTuple instance).
new_data = self.ctx.convert.merge_values(
base.instantiate(self.ctx.root_node).data
)
return self._get_literal_sequence(new_data)
return None
def _restructure_tuple(self, state, tup, pre, post):
"""Collapse the middle part of a tuple into a List variable."""
before = tup[0:pre]
if post > 0:
after = tup[-post:]
rest = tup[pre:-post]
else:
after = ()
rest = tup[pre:]
rest = self.ctx.convert.build_list(state.node, rest)
return before + (rest,) + after
def _unpack_sequence(self, state, n_before, n_after=-1):
"""Pops a tuple (or other iterable) and pushes it onto the VM's stack.
Supports destructuring assignment with potentially a single list variable
that slurps up the remaining elements:
1. a, b, c = ... # UNPACK_SEQUENCE
2. a, *b, c = ... # UNPACK_EX
Args:
state: The current VM state
n_before: Number of elements before the list (n_elements for case 1)
n_after: Number of elements after the list (-1 for case 1)
Returns:
The new state.
"""
assert n_after >= -1
state, seq = state.pop()
options = []
nontuple_seq = self.ctx.program.NewVariable()
has_slurp = n_after > -1
count = n_before + max(n_after, 0)
nondeterministic_iterable = False
for b in abstract_utils.expand_type_parameter_instances(seq.bindings):
if b.data.full_name in ("builtins.set", "builtins.frozenset"):
nondeterministic_iterable = True
tup = self._get_literal_sequence(b.data)
if tup is not None:
if has_slurp and len(tup) >= count:
options.append(self._restructure_tuple(state, tup, n_before, n_after))
continue
elif len(tup) == count:
options.append(tup)
continue
else:
self.ctx.errorlog.bad_unpacking(self.frames, len(tup), count)
if b.IsVisible(state.node):
nontuple_seq.PasteBinding(b, state.node)
if nontuple_seq.bindings:
state, itr = self._get_iter(state, nontuple_seq)
state, itr_result = self._call(state, itr, "__next__", ())
elif not options:
itr_result = self.ctx.new_unsolvable(state.node)
else:
itr_result = None
if itr_result:
# For a non-literal iterable, next() should always return the same type T,
# so we can iterate `count` times in both UNPACK_SEQUENCE and UNPACK_EX,
# and assign the slurp variable type List[T].
option = [itr_result for _ in range(count)]
if has_slurp:
slurp = self.ctx.convert.build_list_of_type(state.node, itr_result)
option = option[:n_before] + [slurp] + option[n_before:]
options.append(option)
values = tuple(
self.ctx.convert.build_content(value, discard_concrete_values=False)
for value in zip(*options)
)
if len(values) > 1 and nondeterministic_iterable:
self.ctx.errorlog.nondeterministic_unpacking(self.frames)
for value in reversed(values):
if not value.bindings:
# For something like
# for i, j in enumerate(()):
# print j
# there are no bindings for j, so we have to add an empty binding
# to avoid a name error on the print statement.
value = self.ctx.convert.empty.to_variable(state.node)
state = state.push(value)
return state
def byte_UNPACK_SEQUENCE(self, state, op):
return self._unpack_sequence(state, op.arg)
def byte_UNPACK_EX(self, state, op):
n_before = op.arg & 0xFF
n_after = op.arg >> 8
return self._unpack_sequence(state, n_before, n_after)
def byte_BUILD_SLICE(self, state, op):
if op.arg == 2:
state, (x, y) = state.popn(2)
return state.push(self.ctx.convert.build_slice(state.node, x, y))
elif op.arg == 3:
state, (x, y, z) = state.popn(3)
return state.push(self.ctx.convert.build_slice(state.node, x, y, z))
else: # pragma: no cover
raise VirtualMachineError(f"Strange BUILD_SLICE count: {op.arg!r}")
def byte_LIST_APPEND(self, state, op):
# Used by the compiler e.g. for [x for x in ...]
count = op.arg
state, val = state.pop()
the_list = state.peek(count)
state, _ = self._call(state, the_list, "append", (val,))
return state
def byte_LIST_EXTEND(self, state, op):
"""Pops top-of-stack and uses it to extend the list at stack[op.arg]."""
state, update = state.pop()
target = state.peek(op.arg)
if not all(abstract_utils.is_concrete_list(v) for v in target.data):
state, _ = self._call(state, target, "extend", (update,))
return state
# Is the list we're constructing going to be the argument list for a
# function call? If so, we will keep any abstract.Splat objects around so we
# can unpack the function arguments precisely. Otherwise, splats will be
# converted to indefinite iterables.
keep_splats = False
next_op = op
# Before Python 3.9, BUILD_TUPLE_UNPACK took care of tuple unpacking. In
# 3.9+, this opcode is replaced by LIST_EXTEND+LIST_TO_TUPLE+CALL_FUNCTION,
# so CALL_FUNCTION needs to be considered as consuming the list.
if self.ctx.python_version >= (3, 11):
call_consumers = (opcodes.CALL,)
else:
call_consumers = (opcodes.CALL_FUNCTION,)
stop_classes = blocks.STORE_OPCODES + call_consumers
while next_op:
next_op = next_op.next
if isinstance(next_op, opcodes.CALL_FUNCTION_EX):
keep_splats = True
break
elif next_op.__class__ in stop_classes:
break
update_elements = vm_utils.unpack_iterable(state.node, update, self.ctx)
if not keep_splats and any(
abstract_utils.is_var_splat(x) for x in update_elements
):
for target_value in target.data:
vm_utils.merge_indefinite_iterables(
state.node, target_value, update_elements
)
else:
for target_value in target.data:
target_value.pyval.extend(update_elements)
for update_value in update.data:
update_param = update_value.get_instance_type_parameter(
abstract_utils.T, state.node
)
# We use Instance.merge_instance_type_parameter because the List
# implementation also sets is_concrete to False.
abstract.Instance.merge_instance_type_parameter(
target_value, state.node, abstract_utils.T, update_param
)
return state
def byte_SET_ADD(self, state, op):
# Used by the compiler e.g. for {x for x in ...}
count = op.arg
state, val = state.pop()
the_set = state.peek(count)
state, _ = self._call(state, the_set, "add", (val,))
return state
def byte_SET_UPDATE(self, state, op):
state, update = state.pop()
target = state.peek(op.arg)
state, _ = self._call(state, target, "update", (update,))
return state
def byte_MAP_ADD(self, state, op):
"""Implements the MAP_ADD opcode."""
# Used by the compiler e.g. for {x, y for x, y in ...}
count = op.arg
# The value is at the top of the stack, followed by the key.
state, item = state.popn(2)
key, val = item
the_map = state.peek(count)
state, _ = self._call(state, the_map, "__setitem__", (key, val))
return state
def byte_DICT_MERGE(self, state, op):
# DICT_MERGE is like DICT_UPDATE but raises an exception for duplicate keys.
return self.byte_DICT_UPDATE(state, op)
def byte_DICT_UPDATE(self, state, op):
"""Pops top-of-stack and uses it to update the dict at stack[op.arg]."""
state, update = state.pop()
target = state.peek(op.arg)
def pytd_update(state):
state, _ = self._call(state, target, "update", (update,))
return state
if not all(abstract_utils.is_concrete_dict(v) for v in target.data):
return pytd_update(state)
try:
update_value = abstract_utils.get_atomic_python_constant(update, dict)
except abstract_utils.ConversionError:
return pytd_update(state)
for abstract_target_value in target.data:
for k, v in update_value.items():
abstract_target_value.set_str_item(state.node, k, v)
return state
def byte_PRINT_EXPR(self, state, op):
# Only used in the interactive interpreter, not in modules.
return state.pop_and_discard()
def byte_JUMP_IF_TRUE_OR_POP(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=True, pop=vm_utils.PopBehavior.OR
)
def byte_JUMP_IF_FALSE_OR_POP(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=False, pop=vm_utils.PopBehavior.OR
)
def byte_JUMP_IF_TRUE(self, state, op):
return vm_utils.jump_if(state, op, self.ctx, jump_if_val=True)
def byte_JUMP_IF_FALSE(self, state, op):
return vm_utils.jump_if(state, op, self.ctx, jump_if_val=False)
def byte_POP_JUMP_IF_TRUE(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=True, pop=vm_utils.PopBehavior.ALWAYS
)
def byte_POP_JUMP_IF_FALSE(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=False, pop=vm_utils.PopBehavior.ALWAYS
)
def byte_JUMP_FORWARD(self, state, op):
self.store_jump(op.target, state.forward_cfg_node("JumpForward"))
return state
def byte_JUMP_ABSOLUTE(self, state, op):
self.store_jump(op.target, state.forward_cfg_node("JumpAbsolute"))
return state
def _check_exc_match(self, state):
if self.ctx.python_version >= (3, 11):
state, exc_type = state.pop()
else:
state, (unused_exc, exc_type) = state.popn(2)
# At runtime, this opcode calls isinstance(exc, exc_type) and pushes the
# result onto the stack. Instead, we use exc_type to refine the type of the
# exception instance still on the stack and push on an indefinite result for
# the isinstance call.
state = self._replace_abstract_exception(state, exc_type)
return state.push(
self.ctx.convert.bool_values[None].to_variable(state.node)
)
def byte_JUMP_IF_NOT_EXC_MATCH(self, state, op):
# Opcode for exception type matching in Python 3.10-. In 3.11+, this is
# replaced by CHECK_EXC_MATCH followed by POP_JUMP_FORWARD_IF_FALSE.
state = self._check_exc_match(state)
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=False, pop=vm_utils.PopBehavior.ALWAYS
)
def byte_CHECK_EXC_MATCH(self, state, op):
# Opcode for exception type matching in Python 3.11+. For 3.10-, see
# JUMP_IF_NOT_EXC_MATCH.
del op # unused
return self._check_exc_match(state)
def byte_SETUP_LOOP(self, state, op):
# We ignore the implicit jump in SETUP_LOOP; the interpreter never takes it.
return vm_utils.push_block(state, "loop")
def byte_GET_ITER(self, state, op):
"""Get the iterator for an object."""
state, seq = state.pop()
state, itr = self._get_iter(state, seq)
# Push the iterator onto the stack and return.
return state.push(itr)
def store_jump(self, target, state):
"""Stores a jump to the target opcode."""
assert target
assert self.frame is not None
current_block = self.frame.current_block
current_opcode = self.frame.current_opcode
assert current_block is not None
assert current_opcode is not None
self.frame.targets[current_block.id].append(target)
if current_opcode.push_exc_block:
state = vm_utils.push_block(
state, "setup-except", index=current_opcode.index
)
elif current_opcode.pop_exc_block:
state, _ = state.pop_block()
self.frame.states[target] = state.merge_into(self.frame.states.get(target))
def byte_FOR_ITER(self, state, op):
# In 3.12+, FOR_ITER pops the iterator off the stack conditionally, see
# https://github.com/python/cpython/issues/121399.
# Pytype doesn't actually execute the loop, so we need to handle this
# differently. We always pop the iterator here, same as in <=3.11. END_FOR
# is a no-op. Since we don't execute the loop, we never have a situation
# where at the end of the loop the top of the stack is `[iter, iter()]`, so
# the double-pop of END_FOR is not needed.
self.store_jump(op.target, state.pop_and_discard())
state, f = self.load_attr(state, state.top(), "__next__")
state = state.push(f)
return self.call_function_from_stack(state, 0, None, None)
def _revert_state_to(self, state, name):
while state.block_stack[-1].type != name:
state, block = state.pop_block()
while block.level < len(state.data_stack):
state = state.pop_and_discard()
return state
def byte_BREAK_LOOP(self, state, op):
new_state, block = self._revert_state_to(state, "loop").pop_block()
while block.level < len(new_state.data_stack):
new_state = new_state.pop_and_discard()
self.store_jump(op.block_target, new_state)
return state
def byte_CONTINUE_LOOP(self, state, op):
new_state = self._revert_state_to(state, "loop")
self.store_jump(op.target, new_state)
return state
def _setup_except(self, state, op):
"""Sets up an except block."""
if isinstance(op, opcodes.SETUP_EXCEPT_311):
jump_state, _ = state.popn(len(state.data_stack) - op.stack_depth)
else:
jump_state = state
# Assume that it's possible to throw the exception at the first
# instruction of the code:
jump_state = self.push_abstract_exception(jump_state)
self.store_jump(op.target, jump_state)
return vm_utils.push_block(state, "setup-except", index=op.index)
def is_setup_except(self, op):
"""Check whether op is setting up an except block."""
if isinstance(op, opcodes.SETUP_FINALLY):
for i, block in enumerate(self.frame.f_code.order):
if block.id == op.arg:
if not any(
isinstance(o, opcodes.BEGIN_FINALLY)
for o in self.frame.f_code.order[i - 1]
):
return True
break
return False
def byte_SETUP_FINALLY(self, state, op):
"""Implements the SETUP_FINALLY opcode."""
# SETUP_FINALLY handles setup for both except and finally blocks. Examine
# the targeted block to determine which setup to do.
if self.is_setup_except(op):
return self._setup_except(state, op)
# Emulate finally by connecting the try to the finally block (with
# empty reason/why/continuation):
self.store_jump(
op.target, state.push(self.ctx.convert.build_none(state.node))
)
return vm_utils.push_block(state, "finally")
# New python3.8+ exception handling opcodes:
# BEGIN_FINALLY, END_ASYNC_FOR, CALL_FINALLY, POP_FINALLY
def byte_BEGIN_FINALLY(self, state, op):
return state.push(self.ctx.convert.build_none(state.node))
def byte_CALL_FINALLY(self, state, op):
return state
def byte_END_ASYNC_FOR(self, state, op):
if self.ctx.python_version < (3, 11):
state, _ = state.popn(7)
else:
# The cpython docs say this pops two values, the iterable and an
# exception. Since we have not pushed an exception in GET_ANEXT, we don't
# need to pop one here.
state, _ = state.pop()
return state
def byte_POP_FINALLY(self, state, op):
"""Implements POP_FINALLY."""
preserve_tos = op.arg
if preserve_tos:
state, saved_tos = state.pop()
state, tos = state.pop()
if any(
d != self.ctx.convert.none and d.cls != self.ctx.convert.int_type
for d in tos.data
):
state, _ = state.popn(5)
if preserve_tos:
state = state.push(saved_tos) # pytype: disable=name-error
return state
def byte_POP_BLOCK(self, state, op):
state, _ = state.pop_block()
return state
def byte_RAISE_VARARGS(self, state, op):
"""Raise an exception."""
argc = op.arg
state, _ = state.popn(argc)
if argc == 0 and state.exception:
return state.set_why("reraise")
else:
state = state.set_exception()
return state.set_why("exception")
def byte_POP_EXCEPT(self, state, op): # Python 3 only
return self.pop_abstract_exception(state)
def byte_SETUP_WITH(self, state, op):
"""Starts a 'with' statement. Will push a block."""
state, ctxmgr = state.pop()
level = len(state.data_stack)
state, exit_method = self.load_attr(state, ctxmgr, "__exit__")
state = state.push(exit_method)
state, ctxmgr_obj = self._call(state, ctxmgr, "__enter__", ())
state = vm_utils.push_block(state, "finally", level)
return state.push(ctxmgr_obj)
def _with_cleanup_start_none(self, state, op):
"""Implements WITH_CLEANUP_START when TOS is None."""
state, u = state.pop() # pop 'None'
state, exit_func = state.pop()
state = state.push(u)
state = state.push(self.ctx.convert.build_none(state.node))
v = self.ctx.convert.build_none(state.node)
w = self.ctx.convert.build_none(state.node)
state, suppress_exception = self.call_function_with_state(
state, exit_func, (u, v, w)
)
return state.push(suppress_exception)
def _with_cleanup_start(self, state, op):
"""Implements WITH_CLEANUP_START."""
tos = state.top()
if tos.data == [self.ctx.convert.none]:
return self._with_cleanup_start_none(state, op)
state, (w, v, u, *rest, exit_func) = state.popn(7)
state = state.push(*rest)
state = state.push(self.ctx.convert.build_none(state.node))
state = state.push(w, v, u)
state, suppress_exception = self.call_function_with_state(
state, exit_func, (u, v, w)
)
return state.push(suppress_exception)
def byte_WITH_CLEANUP_START(self, state, op):
"""Called to start cleaning up a with block. Calls the exit handlers etc."""
return self._with_cleanup_start(state, op)
def byte_WITH_CLEANUP_FINISH(self, state, op):
"""Called to finish cleaning up a with block."""
state, suppress_exception = state.pop()
state, second = state.pop()
if suppress_exception.data == [self.ctx.convert.true] and second.data != [
self.ctx.convert.none
]:
state = state.push(self.ctx.convert.build_none(state.node))
return state
def _convert_kw_defaults(self, values):
kw_defaults = {}
for i in range(0, len(values), 2):
key_var, value = values[i : i + 2]
key = abstract_utils.get_atomic_python_constant(key_var)
kw_defaults[key] = value
return kw_defaults
def _get_extra_closure_args(self, state, arg):
"""Get closure annotations and defaults from the stack."""
num_pos_defaults = arg & 0xFF
num_kw_defaults = (arg >> 8) & 0xFF
state, raw_annotations = state.popn((arg >> 16) & 0x7FFF)
state, kw_defaults = state.popn(2 * num_kw_defaults)
state, pos_defaults = state.popn(num_pos_defaults)
free_vars = None # Python < 3.6 does not handle closure vars here.
kw_defaults = self._convert_kw_defaults(kw_defaults)
annot = self.ctx.annotation_utils.convert_function_annotations(
state.node, raw_annotations
)
return state, pos_defaults, kw_defaults, annot, free_vars
def _get_extra_function_args(self, state, arg):
"""Get function annotations and defaults from the stack."""
free_vars = None
pos_defaults = ()
kw_defaults = {}
annot = {}
Flags = pyc_marshal.Flags
if arg & Flags.MAKE_FUNCTION_HAS_FREE_VARS:
state, free_vars = state.pop()
if arg & Flags.MAKE_FUNCTION_HAS_ANNOTATIONS:
state, packed_annot = state.pop()
# In Python 3.10+, packed_annot is a tuple of variables:
# (param_name1, param_type1, param_name2, param_type2, ...)
# Previously, it was a name->param_type dictionary.
if self.ctx.python_version >= (3, 10):
annot_seq = abstract_utils.get_atomic_python_constant(
packed_annot, tuple
)
double_num_annots = len(annot_seq)
assert not double_num_annots % 2
annot = {}
for i in range(double_num_annots // 2):
name = abstract_utils.get_atomic_python_constant(
annot_seq[i * 2], str
)
annot[name] = annot_seq[i * 2 + 1]
else:
annot = abstract_utils.get_atomic_python_constant(packed_annot, dict)
for k in annot:
annot[k] = self.ctx.annotation_utils.convert_function_type_annotation(
k, annot[k]
)
if arg & Flags.MAKE_FUNCTION_HAS_KW_DEFAULTS:
state, packed_kw_def = state.pop()
kw_defaults = abstract_utils.get_atomic_python_constant(
packed_kw_def, dict
)
if arg & Flags.MAKE_FUNCTION_HAS_POS_DEFAULTS:
state, packed_pos_def = state.pop()
pos_defaults = abstract_utils.get_atomic_python_constant(
packed_pos_def, tuple
)
annot = self.ctx.annotation_utils.convert_annotations_list(
state.node, annot.items()
)
return state, pos_defaults, kw_defaults, annot, free_vars
def byte_MAKE_FUNCTION(self, state, op):
"""Create a function and push it onto the stack."""
if self.ctx.python_version >= (3, 11):
name = None # the name will be read from the code object
else:
state, name_var = state.pop()
name = abstract_utils.get_atomic_python_constant(name_var)
state, code = state.pop()
state, defaults, kw_defaults, annot, free_vars = (
self._get_extra_function_args(state, op.arg)
)
globs = self.get_globals_dict()
func_var = vm_utils.make_function(
name,
state.node,
code,
globs,
defaults,
kw_defaults,
annotations=annot,
closure=free_vars,
opcode=op,
ctx=self.ctx,
)
func = func_var.data[0]
func.decorators = self._director.decorators[op.line]
func.cache_return = self._director.has_pragma("cache-return", op.line)
vm_utils.process_function_type_comment(state.node, op, func, self.ctx)
self.trace_opcode(op, func.name, func_var)
self.trace_functiondef(func_var)
return state.push(func_var)
def byte_MAKE_CLOSURE(self, state, op):
"""Make a function that binds local variables."""
state, name_var = state.pop()
name = abstract_utils.get_atomic_python_constant(name_var)
state, (closure, code) = state.popn(2)
state, defaults, kw_defaults, annot, _ = self._get_extra_closure_args(
state, op.arg
)
globs = self.get_globals_dict()
fn = vm_utils.make_function(
name,
state.node,
code,
globs,
defaults,
kw_defaults,
annotations=annot,
closure=closure,
opcode=op,
ctx=self.ctx,
)
self.trace_functiondef(fn)
return state.push(fn)
def byte_CALL_FUNCTION(self, state, op):
return self.call_function_from_stack(state, op.arg, None, None)
def byte_CALL_FUNCTION_VAR(self, state, op):
state, starargs = self.pop_varargs(state)
starargs = vm_utils.ensure_unpacked_starargs(state.node, starargs, self.ctx)
return self.call_function_from_stack(state, op.arg, starargs, None)
def byte_CALL_FUNCTION_KW(self, state, op):
state, kwargs = self.pop_kwargs(state)
return self.call_function_from_stack(state, op.arg, None, kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, state, op):
state, kwargs = self.pop_kwargs(state)
state, starargs = self.pop_varargs(state)
starargs = vm_utils.ensure_unpacked_starargs(state.node, starargs, self.ctx)
return self.call_function_from_stack(state, op.arg, starargs, kwargs)
def byte_CALL_FUNCTION_EX(self, state, op):
"""Call a function."""
if op.arg & pyc_marshal.Flags.CALL_FUNCTION_EX_HAS_KWARGS:
state, starstarargs = state.pop()
else:
starstarargs = None
state, starargs = state.pop()
starargs = vm_utils.ensure_unpacked_starargs(state.node, starargs, self.ctx)
state, fn = state.pop()
if self.ctx.python_version >= (3, 11):
state = state.pop_and_discard()
with self._reset_overloads(fn):
state, ret = self.call_function_with_state(
state,
fn,
(),
namedargs=None,
starargs=starargs,
starstarargs=starstarargs,
)
return state.push(ret)
def _check_frame_yield(self, state, yield_value):
if not self.frame.check_return:
return None
generator_type = self.frame.allowed_returns
assert generator_type is not None
self._check_return(
state.node,
yield_value,
generator_type.get_formal_type_parameter(abstract_utils.T),
)
return generator_type
def byte_YIELD_VALUE(self, state, op):
"""Yield a value from a generator."""
state, yield_value = state.pop()
yield_variable = self.frame.yield_variable.AssignToNewVariable(state.node)
yield_variable.PasteVariable(yield_value, state.node)
self.frame.yield_variable = yield_variable
generator_type = self._check_frame_yield(state, yield_value)
if generator_type:
send_type = generator_type.get_formal_type_parameter(abstract_utils.T2)
send_var = self.init_class(state.node, send_type)
else:
send_var = self.ctx.new_unsolvable(state.node)
return state.push(send_var)
def byte_IMPORT_NAME(self, state, op):
"""Import a single module."""
full_name = op.argval
# The identifiers in the (unused) fromlist are repeated in IMPORT_FROM.
state, (level_var, fromlist) = state.popn(2)
if op.line in self._director.ignore:
# "import name # type: ignore"
self.trace_opcode(op, full_name, None)
return state.push(self.ctx.new_unsolvable(state.node))
# The IMPORT_NAME for an "import a.b.c" will push the module "a".
# However, for "from a.b.c import Foo" it'll push the module "a.b.c". Those
# two cases are distinguished by whether fromlist is None or not.
if self._var_is_none(fromlist):
name = full_name.split(".", 1)[0] # "a.b.c" -> "a"
else:
name = full_name
level = abstract_utils.get_atomic_python_constant(level_var)
module = self.import_module(name, full_name, level)
if module is None:
log.warning("Couldn't find module %r", name)
self.ctx.errorlog.import_error(self.frames, name)
module = self.ctx.convert.unsolvable
mod = module.to_variable(state.node)
self.trace_opcode(op, full_name, mod)
return state.push(mod)
def byte_IMPORT_FROM(self, state, op):
"""IMPORT_FROM is mostly like LOAD_ATTR but doesn't pop the container."""
name = op.argval
if op.line in self._director.ignore:
# "from x import y # type: ignore"
# TODO(mdemello): Should we add some sort of signal data to indicate that
# this should be treated as resolvable even though there is no module?
self.trace_opcode(op, name, None)
return state.push(self.ctx.new_unsolvable(state.node))
module = state.top()
state, attr = self.load_attr_noerror(state, module, name)
if attr is None:
full_name = module.data[0].name + "." + name
self.ctx.errorlog.import_error(self.frames, full_name)
attr = self.ctx.new_unsolvable(state.node)
self.trace_opcode(op, name, attr)
return state.push(attr)
def byte_LOAD_BUILD_CLASS(self, state, op):
cls = abstract.BuildClass(self.ctx).to_variable(state.node)
# Will be copied into the abstract.InterpreterClass
cls.data[0].decorators = self._director.decorators[op.line]
self.trace_opcode(op, "", cls)
return state.push(cls)
def byte_END_FINALLY(self, state, op):
"""Implementation of the END_FINALLY opcode."""
state, exc = state.pop()
if self._var_is_none(exc):
return state
else:
log.info("Popping exception %r", exc)
state = state.pop_and_discard()
state = state.pop_and_discard()
# If a pending exception makes it all the way out of an "except" block,
# no handler matched, hence Python re-raises the exception.
return state.set_why("reraise")
def _check_return(self, node, actual, formal):
return False # overwritten in tracer_vm.py
def _set_frame_return(self, node, frame, var):
if frame.allowed_returns is not None:
retvar = self.init_class(node, frame.allowed_returns)
else:
retvar = var
frame.return_variable.PasteVariable(retvar, node)
def _return_value(self, state, var):
"""Get and check the return value."""
if self.frame.check_return:
if (
self.frame.f_code.has_generator()
or self.frame.f_code.has_coroutine()
or self.frame.f_code.has_iterable_coroutine()
):
ret_type = self.frame.allowed_returns
assert ret_type is not None
allowed_return = ret_type.get_formal_type_parameter(abstract_utils.V)
elif not self.frame.f_code.has_async_generator():
allowed_return = self.frame.allowed_returns
else:
allowed_return = None
if allowed_return:
self._check_return(state.node, var, allowed_return)
if self.ctx.options.no_return_any and any(
d == self.ctx.convert.unsolvable for d in var.data
):
self.ctx.errorlog.any_return_type(self.frames)
self._set_frame_return(state.node, self.frame, var)
return state.set_why("return")
def byte_RETURN_VALUE(self, state, op):
state, var = state.pop()
return self._return_value(state, var)
def byte_RETURN_CONST(self, state, op):
const = self.ctx.convert.constant_to_var(op.argval, node=state.node)
self.trace_opcode(op, op.argval, const)
return self._return_value(state, const)
def _import_star(self, state):
"""Pops a module and stores all its contents in locals()."""
# TODO(b/159041010): this doesn't use __all__ properly.
state, mod_var = state.pop()
mod = abstract_utils.get_atomic_value(mod_var)
if isinstance(mod, (abstract.Unknown, abstract.Unsolvable)):
self.has_unknown_wildcard_imports = True
return state
log.info("%r", mod)
for name, var in mod.items():
if name[0] != "_" or name == "__getattr__":
state = self.store_local(state, name, var)
return state
def byte_IMPORT_STAR(self, state, op):
return self._import_star(state)
def byte_SETUP_ANNOTATIONS(self, state, op):
"""Sets up variable annotations in locals()."""
annotations = abstract.AnnotationsDict(
self.current_annotated_locals, self.ctx
).to_variable(state.node)
return self.store_local(state, "__annotations__", annotations)
def _record_annotation(self, node, op, name, typ):
# Annotations in self._director are handled by _apply_annotation.
if self.current_line not in self._director.annotations:
self._record_local(node, op, name, typ)
def byte_STORE_ANNOTATION(self, state, op):
"""Implementation of the STORE_ANNOTATION opcode."""
state, annotations_var = self.load_local(state, "__annotations__")
name = op.argval
state, value = state.pop()
typ = self.ctx.annotation_utils.extract_annotation(
state.node,
value,
name,
self.simple_stack(),
allowed_type_params=self.frame.type_params,
)
self._record_annotation(state.node, op, name, typ)
key = self.ctx.convert.primitive_instances[str]
state = self.store_subscr(
state, annotations_var, key.to_variable(state.node), value
)
return self.store_local(state, "__annotations__", annotations_var)
def byte_GET_YIELD_FROM_ITER(self, state, op):
"""Implementation of the GET_YIELD_FROM_ITER opcode."""
# Do nothing with TOS bindings that are generator iterators or coroutines;
# call GET_ITER on the rest.
get_iter = self.ctx.program.NewVariable()
unchanged = self.ctx.program.NewVariable()
state, tos = state.pop()
for b in tos.bindings:
if b.data.full_name in ("typing.Generator", "typing.Coroutine"):
unchanged.PasteBinding(b)
else:
get_iter.PasteBinding(b)
if get_iter.bindings:
state = state.push(get_iter)
state = self.byte_GET_ITER(state, op)
state.peek(0).PasteVariable(unchanged)
else:
state = state.push(unchanged)
return state
def byte_BUILD_LIST_UNPACK(self, state, op):
return vm_utils.unpack_and_build(
state,
op.arg,
self.ctx.convert.build_list,
self.ctx.convert.list_type,
self.ctx,
)
def _list_to_tuple(self, state):
"""Convert the list at the top of the stack to a tuple."""
state, lst_var = state.pop()
tup_var = self.ctx.program.NewVariable()
for b in lst_var.bindings:
if abstract_utils.is_concrete_list(b.data):
tup_var.AddBinding(
self.ctx.convert.tuple_to_value(b.data.pyval), {b}, state.node
)
else:
param = b.data.get_instance_type_parameter(abstract_utils.T)
tup = abstract.Instance(self.ctx.convert.tuple_type, self.ctx)
tup.merge_instance_type_parameter(state.node, abstract_utils.T, param)
tup_var.AddBinding(tup, {b}, state.node)
return state.push(tup_var)
def byte_LIST_TO_TUPLE(self, state, op):
del op # unused
return self._list_to_tuple(state)
def byte_BUILD_MAP_UNPACK(self, state, op):
state, maps = state.popn(op.arg)
args = vm_utils.build_map_unpack(state, maps, self.ctx)
return state.push(args)
def byte_BUILD_MAP_UNPACK_WITH_CALL(self, state, op):
state, maps = state.popn(op.arg)
args = vm_utils.build_map_unpack(state, maps, self.ctx)
return state.push(args)
def byte_BUILD_TUPLE_UNPACK(self, state, op):
return vm_utils.unpack_and_build(
state,
op.arg,
self.ctx.convert.build_tuple,
self.ctx.convert.tuple_type,
self.ctx,
)
def byte_BUILD_TUPLE_UNPACK_WITH_CALL(self, state, op):
state, seq = vm_utils.pop_and_unpack_list(state, op.arg, self.ctx)
ret = vm_utils.build_function_args_tuple(state.node, seq, self.ctx)
return state.push(ret)
def byte_BUILD_SET_UNPACK(self, state, op):
return vm_utils.unpack_and_build(
state,
op.arg,
self.ctx.convert.build_set,
self.ctx.convert.set_type,
self.ctx,
)
def byte_SETUP_ASYNC_WITH(self, state, op):
state, res = state.pop()
level = len(state.data_stack)
state = vm_utils.push_block(state, "finally", level)
return state.push(res)
def byte_FORMAT_VALUE(self, state, op):
if op.arg & pyc_marshal.Flags.FVS_MASK:
state = state.pop_and_discard()
# FORMAT_VALUE pops, formats and pushes back a string, so we just need to
# push a new string onto the stack.
state = state.pop_and_discard()
ret = abstract.Instance(self.ctx.convert.str_type, self.ctx).to_variable(
state.node
)
self.trace_opcode(None, "__mod__", ret)
return state.push(ret)
def byte_BUILD_CONST_KEY_MAP(self, state, op):
state, keys = state.pop()
keys = abstract_utils.get_atomic_python_constant(keys, tuple)
the_map = self.ctx.convert.build_map(state.node)
assert len(keys) == op.arg
for key in reversed(keys):
state, val = state.pop()
state = self.store_subscr(state, the_map, key, val)
return state.push(the_map)
def byte_BUILD_STRING(self, state, op):
# TODO(mdemello): Test this.
state, _ = state.popn(op.arg)
ret = abstract.Instance(self.ctx.convert.str_type, self.ctx)
return state.push(ret.to_variable(state.node))
def byte_GET_AITER(self, state, op):
"""Implementation of the GET_AITER opcode."""
state, obj = state.pop()
state, itr = self._get_aiter(state, obj)
# Push the iterator onto the stack and return.
state = state.push(itr)
return state
def byte_GET_ANEXT(self, state, op):
"""Implementation of the GET_ANEXT opcode."""
state, ret = self._call(state, state.top(), "__anext__", ())
if not self._check_return(state.node, ret, self.ctx.convert.awaitable_type):
ret = self.ctx.new_unsolvable(state.node)
return state.push(ret)
def byte_BEFORE_ASYNC_WITH(self, state, op):
"""Implementation of the BEFORE_ASYNC_WITH opcode."""
# Pop a context manager and push its `__aexit__` and `__aenter__()`.
state, ctxmgr = state.pop()
state, aexit_method = self.load_attr(state, ctxmgr, "__aexit__")
state = state.push(aexit_method)
state, ctxmgr_obj = self._call(state, ctxmgr, "__aenter__", ())
return state.push(ctxmgr_obj)
def byte_GET_AWAITABLE(self, state, op):
"""Implementation of the GET_AWAITABLE opcode."""
state, obj = state.pop()
state, ret = vm_utils.to_coroutine(state, obj, True, self.ctx)
if not self._check_return(state.node, ret, self.ctx.convert.awaitable_type):
ret = self.ctx.new_unsolvable(state.node)
return state.push(ret)
def _get_generator_yield(self, node, generator_var):
yield_var = self.frame.yield_variable.AssignToNewVariable(node)
for generator in generator_var.data:
if generator.full_name == "typing.Generator":
yield_value = generator.get_instance_type_parameter(abstract_utils.T)
yield_var.PasteVariable(yield_value, node)
return yield_var
def _get_generator_return(self, node, generator_var):
"""Gets generator_var's return value."""
ret_var = self.ctx.program.NewVariable()
for b in generator_var.bindings:
generator = b.data
if isinstance(
generator,
(abstract.Generator, abstract.Coroutine, abstract.Unsolvable),
):
ret = generator.get_instance_type_parameter(abstract_utils.V)
ret_var.PasteVariable(ret, node, {b})
elif (
isinstance(generator, abstract.Instance)
and isinstance(
generator.cls, (abstract.ParameterizedClass, abstract.PyTDClass)
)
and generator.cls.full_name
in ("typing.Awaitable", "typing.Coroutine", "typing.Generator")
):
if generator.cls.full_name == "typing.Awaitable":
ret = generator.get_instance_type_parameter(abstract_utils.T)
else:
ret = generator.get_instance_type_parameter(abstract_utils.V)
if ret.bindings:
ret_var.PasteVariable(ret, node, {b})
else:
ret_var.AddBinding(self.ctx.convert.unsolvable, {b}, node)
else:
ret_var.AddBinding(generator, {b}, node)
if not ret_var.bindings:
ret_var.AddBinding(self.ctx.convert.unsolvable, [], node)
return ret_var
def byte_YIELD_FROM(self, state, op):
"""Implementation of the YIELD_FROM opcode."""
state, (generator, unused_send) = state.popn(2)
yield_var = self._get_generator_yield(state.node, generator)
if yield_var.bindings:
self.frame.yield_variable = yield_var
_ = self._check_frame_yield(state, yield_var)
ret_var = self._get_generator_return(state.node, generator)
return state.push(ret_var)
def _load_method(self, state, self_obj, name):
"""Loads and pushes a method on the stack.
Args:
state: the current VM state.
self_obj: the `self` object of the method.
name: the name of the method.
Returns:
(state, method) where `state` is the updated VM state and `method` is the
method that was loaded. The method is already pushed onto the stack,
either at the top or below the `self` object.
"""
state, result = self.load_attr(state, self_obj, name)
# https://docs.python.org/3.11/library/dis.html#opcode-LOAD_METHOD says that
# this opcode should push two values onto the stack: either the unbound
# method and its `self` or NULL and the bound method. Since we always
# retrieve a bound method, we push the NULL
state = self._push_null(state)
return state.push(result), result
def byte_LOAD_METHOD(self, state, op):
"""Implementation of the LOAD_METHOD opcode."""
name = op.argval
state, self_obj = state.pop()
state, method = self._load_method(state, self_obj, name)
self.trace_opcode(op, name, (self_obj, method))
return state
def _store_new_var_in_local(self, state, var, new_var):
"""Assign a new var to a variable in locals."""
varname = self.get_var_name(var)
if not varname or varname not in self.frame.f_locals.pyval:
# We cannot store the new value back in locals.
return state
state = state.forward_cfg_node(f"ReplaceLocal:{varname}")
state = self._store_value(state, varname, new_var, local=True)
return state
def _narrow(self, state, var, pred):
"""Narrow a variable by removing bindings that do not satisfy pred."""
keep = [b for b in var.bindings if pred(b.data)]
if len(keep) == len(var.bindings):
# Nothing to narrow.
return state
out = self.ctx.program.NewVariable()
for b in keep:
out.PasteBinding(b, state.node)
return self._store_new_var_in_local(state, var, out)
def _set_type_from_assert_isinstance(self, state, var, class_spec):
"""Set type of var from an assertIsInstance(var, class_spec) call."""
# TODO(mdemello): If we want to cast var to typ via an assertion, should
# we require that at least one binding of var is compatible with typ?
classes = []
abstract_utils.flatten(class_spec, classes)
ret = []
# First try to narrow `var` based on `classes`.
for c in classes:
m = self.ctx.matcher(state.node).compute_one_match(
var, c, keep_all_views=True, match_all_views=False
)
if m.success:
for matched in m.good_matches:
d = matched.view[var]
if isinstance(d.data, abstract.Instance):
ret.append(d.data.cls)
# If we don't have bindings from `classes` in `var`, instantiate the
# original class spec.
ret = ret or classes
instance = self.init_class(state.node, self.ctx.convert.merge_values(ret))
return self._store_new_var_in_local(state, var, instance)
def _check_test_assert(self, state, func, args):
"""Narrow the types of variables based on test assertions."""
# We need a variable to narrow
if not args:
return state
var = args[0]
f = func.data[0]
if not isinstance(f, abstract.BoundFunction) or len(f.callself.data) != 1:
return state
cls = f.callself.data[0].cls
if not (isinstance(cls, abstract.Class) and cls.is_test_class()):
return state
if f.name == "assertIsNotNone":
pred = lambda v: not self._data_is_none(v)
state = self._narrow(state, var, pred)
elif f.name == "assertIsInstance":
if len(args) >= 2:
class_spec = args[1].data[0]
state = self._set_type_from_assert_isinstance(state, var, class_spec)
return state
def byte_CALL_METHOD(self, state, op):
state, args = state.popn(op.arg)
state, func = state.pop()
# pop the NULL off the stack (see LOAD_METHOD)
state, _ = state.pop()
with self._reset_overloads(func):
state, result = self.call_function_with_state(state, func, args)
return state.push(result)
def byte_RERAISE(self, state, op):
del op # unused
state = self.pop_abstract_exception(state)
return state.set_why("reraise")
def byte_WITH_EXCEPT_START(self, state, op):
del op # unused
if self.ctx.python_version < (3, 11):
func = state.peek(7)
else:
func = state.peek(4)
args = state.topn(3)
state, result = self.call_function_with_state(state, func, args)
return state.push(result)
def byte_LOAD_ASSERTION_ERROR(self, state, op):
del op # unused
assert_error = self.ctx.convert.lookup_value("builtins", "AssertionError")
return state.push(assert_error.to_variable(state.node))
def byte_GET_LEN(self, state, op):
del op
var = state.top()
elts = vm_utils.unpack_iterable(state.node, var, self.ctx)
length = abstract.SequenceLength(elts, self.ctx)
log.debug("get_len: %r", length)
return state.push(length.instantiate(state.node))
def byte_MATCH_MAPPING(self, state, op):
self._branch_tracker.register_match_type(op)
obj_var = state.top()
is_map = vm_utils.match_mapping(state.node, obj_var, self.ctx)
ret = self.ctx.convert.bool_values[is_map]
log.debug("match_mapping: %r", ret)
return state.push(ret.to_variable(state.node))
def byte_MATCH_SEQUENCE(self, state, op):
self._branch_tracker.register_match_type(op)
obj_var = state.top()
is_seq = vm_utils.match_sequence(obj_var)
ret = self.ctx.convert.bool_values[is_seq]
log.debug("match_sequence: %r", ret)
return state.push(ret.to_variable(state.node))
def byte_MATCH_KEYS(self, state, op):
"""Implementation of the MATCH_KEYS opcode."""
self._branch_tracker.register_match_type(op)
obj_var, keys_var = state.topn(2)
ret = vm_utils.match_keys(state.node, obj_var, keys_var, self.ctx)
vals = ret or self.ctx.convert.none.to_variable(state.node)
state = state.push(vals)
if self.ctx.python_version == (3, 10):
succ = self.ctx.convert.bool_values[bool(ret)]
state = state.push(succ.to_variable(state.node))
return state
def _store_local_or_cellvar(self, state, name, var):
if name in self.frame.f_locals.pyval:
return self.store_local(state, name, var)
try:
idx = self.frame.f_code.get_cell_index(name)
except ValueError:
return self.store_local(state, name, var)
self.frame.cells[idx].PasteVariable(var)
return state
def byte_MATCH_CLASS(self, state, op):
"""Implementation of the MATCH_CLASS opcode."""
# NOTE: 3.10 specific; stack effects change somewhere en route to 3.12
self._branch_tracker.register_match_type(op)
posarg_count = op.arg
state, keys_var = state.pop()
state, (obj_var, cls_var) = state.popn(2)
orig_node = state.node
ret = vm_utils.match_class(
state.node, obj_var, cls_var, keys_var, posarg_count, self.ctx
)
state = state.forward_cfg_node("MatchClass")
success = ret.success
vals = ret.values or self.ctx.convert.none.to_variable(state.node)
if ret.matched:
# Narrow the type of the match variable since we are in a case branch
# where it has matched the given class. The branch tracker will store the
# original (unnarrowed) type, since the new variable shadows it.
complete = self._branch_tracker.add_class_branch(op, obj_var, cls_var)
success = success or complete
var_name = self._var_names.get(obj_var.id)
if var_name:
narrowed_type = self._branch_tracker.instantiate_case_var(
op, obj_var, state.node
)
state = self._store_local_or_cellvar(state, var_name, narrowed_type)
if self.ctx.python_version == (3, 10):
state = state.push(vals)
succ = self.ctx.convert.bool_values[success].to_variable(state.node)
state = state.push(succ)
else:
if success is None:
# In 3.11 we only have a single return value on the stack. If the match
# is ambigious, we need to add a second binding so the subsequent
# JUMP_IF will take both branches.
vals.AddBinding(self.ctx.convert.none, [], orig_node)
state = state.push(vals)
return state
def byte_COPY_DICT_WITHOUT_KEYS(self, state, op):
del op
state, keys_var = state.pop()
obj_var = state.top()
ret = vm_utils.copy_dict_without_keys(
state.node, obj_var, keys_var, self.ctx
)
return state.push(ret)
def byte_GEN_START(self, state, op):
del op
return state.pop_and_discard()
def byte_CACHE(self, state, op):
# No stack or type effects
del op
return state
def _push_null(self, state):
null = abstract.Null(self.ctx).to_variable(state.node)
return state.push(null)
def byte_PUSH_NULL(self, state, op):
return self._push_null(state)
def byte_PUSH_EXC_INFO(self, state, op):
del op
state, top = state.pop()
exc = self.ctx.new_unsolvable(state.node)
state = state.push(exc)
return state.push(top)
def _exc_type_to_group(self, node, exc_type):
"""Creates an ExceptionGroup from an exception type."""
exc_group_base = self.ctx.convert.lookup_value("builtins", "ExceptionGroup")
flattened_exc_type = []
# In `except* exc_type`, exc_type can be a single exception class or a tuple
# of exception classes.
for v in exc_type.data:
if isinstance(v, abstract.Tuple):
for element in v.pyval:
flattened_exc_type.extend(element.data)
elif (
isinstance(v.cls, abstract.ParameterizedClass)
and v.cls.base_cls == self.ctx.convert.tuple_type
):
flattened_exc_type.extend(
v.get_instance_type_parameter(abstract_utils.T).data
)
elif v.cls == self.ctx.convert.tuple_type:
flattened_exc_type.append(self.ctx.convert.unsolvable)
else:
flattened_exc_type.append(v)
exc_group_type = abstract.ParameterizedClass(
exc_group_base,
{abstract_utils.T: self.ctx.convert.merge_values(flattened_exc_type)},
self.ctx,
)
return exc_group_type.instantiate(node)
def byte_CHECK_EG_MATCH(self, state, op):
del op
state, exc_type = state.pop()
return state.push(self._exc_type_to_group(state.node, exc_type))
def byte_BEFORE_WITH(self, state, op):
del op
state, ctxmgr = state.pop()
state, exit_method = self.load_attr(state, ctxmgr, "__exit__")
state = state.push(exit_method)
state, ctxmgr_obj = self._call(state, ctxmgr, "__enter__", ())
return state.push(ctxmgr_obj)
def byte_RETURN_GENERATOR(self, state, op):
del op
return state
def byte_ASYNC_GEN_WRAP(self, state, op):
del op
return state
def byte_PREP_RERAISE_STAR(self, state, op):
del op
return state
def byte_SWAP(self, state, op):
return state.swap(op.arg)
def byte_POP_JUMP_FORWARD_IF_FALSE(self, state, op):
return self.byte_POP_JUMP_IF_FALSE(state, op)
def byte_POP_JUMP_FORWARD_IF_TRUE(self, state, op):
return self.byte_POP_JUMP_IF_TRUE(state, op)
def byte_COPY(self, state, op):
return state.push(state.peek(op.arg))
def byte_BINARY_OP(self, state, op):
"""Implementation of BINARY_OP opcode."""
# Python 3.11 unified a lot of BINARY_* and INPLACE_* opcodes into a single
# BINARY_OP. The underlying operations remain unchanged, so we can just
# dispatch to them.
binops = [
self.byte_BINARY_ADD,
self.byte_BINARY_AND,
self.byte_BINARY_FLOOR_DIVIDE,
self.byte_BINARY_LSHIFT,
self.byte_BINARY_MATRIX_MULTIPLY,
self.byte_BINARY_MULTIPLY,
self.byte_BINARY_MODULO, # NB_REMAINDER in 3.11
self.byte_BINARY_OR,
self.byte_BINARY_POWER,
self.byte_BINARY_RSHIFT,
self.byte_BINARY_SUBTRACT,
self.byte_BINARY_TRUE_DIVIDE,
self.byte_BINARY_XOR,
self.byte_INPLACE_ADD,
self.byte_INPLACE_AND,
self.byte_INPLACE_FLOOR_DIVIDE,
self.byte_INPLACE_LSHIFT,
self.byte_INPLACE_MATRIX_MULTIPLY,
self.byte_INPLACE_MULTIPLY,
self.byte_INPLACE_MODULO, # NB_INPLACE_REMAINDER in 3.11
self.byte_INPLACE_OR,
self.byte_INPLACE_POWER,
self.byte_INPLACE_RSHIFT,
self.byte_INPLACE_SUBTRACT,
self.byte_INPLACE_TRUE_DIVIDE,
self.byte_INPLACE_XOR,
]
binop = binops[op.arg]
return binop(state, op)
def byte_SEND(self, state, op):
"""Implementation of SEND opcode."""
# In Python 3.11, a SEND + YIELD_VALUE + JUMP_BACKWARD_NO_INTERRUPT sequence
# is used to implement `yield from` (previously implemented by the
# YIELD_FROM opcode). SEND gets a value from a generator, YIELD_VALUE yields
# the value, and JUMP_BACKWARD_NO_INTERRUPT jumps back to SEND, repeatedly,
# until the generator runs out of values. Then SEND pushes the generator's
# return value onto the stack and jumps past JUMP_BACKWARD_NO_INTERRUPT. See
# https://github.com/python/cpython/blob/c6d5628be950bdf2c31243b4cc0d9e0b658458dd/Python/ceval.c#L2577
# for the CPython source.
state, unused_send = state.pop()
generator = state.top()
yield_var = self._get_generator_yield(state.node, generator)
ret_var = self._get_generator_return(state.node, generator)
if self.ctx.python_version >= (3, 12):
self.store_jump(op.target, state.push(ret_var))
else:
self.store_jump(op.target, state.set_top(ret_var))
return state.push(yield_var)
def byte_POP_JUMP_FORWARD_IF_NOT_NONE(self, state, op):
# Check if this is a `case None` statement (3.11+ compiles it directly to a
# conditional jump rather than a compare and then jump).
self._branch_tracker.register_match_type(op)
match_none = self._branch_tracker.add_none_branch(op, state.top())
if match_none is True: # pylint: disable=g-bool-id-comparison
# This always fails due to earlier pattern matches, so replace the top of
# the stack with a None to ensure we do not jump.
state = state.pop_and_discard()
value = self.ctx.convert.none.to_variable(state.node)
state = state.push(value)
return vm_utils.jump_if(
state,
op,
self.ctx,
jump_if_val=frame_state.NOT_NONE,
pop=vm_utils.PopBehavior.ALWAYS,
)
def byte_POP_JUMP_FORWARD_IF_NONE(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=None, pop=vm_utils.PopBehavior.ALWAYS
)
def byte_JUMP_BACKWARD_NO_INTERRUPT(self, state, op):
self.store_jump(op.target, state.forward_cfg_node("JumpBackward"))
return state
def byte_MAKE_CELL(self, state, op):
del op
return state
def byte_JUMP_BACKWARD(self, state, op):
self.store_jump(op.target, state.forward_cfg_node("JumpBackward"))
return state
def byte_COPY_FREE_VARS(self, state, op):
self.frame.copy_free_vars(op.arg)
return state
def byte_RESUME(self, state, op):
# No stack or type effects
del op
return state
def byte_PRECALL(self, state, op):
# No stack or type effects
del op
return state
def byte_CALL(self, state, op):
return self.call_function_from_stack_311(state, op.arg)
def byte_KW_NAMES(self, state, op):
# Stores a list of kw names to be retrieved by CALL
self._kw_names = op.argval
return state
def byte_POP_JUMP_BACKWARD_IF_NOT_NONE(self, state, op):
return vm_utils.jump_if(
state,
op,
self.ctx,
jump_if_val=frame_state.NOT_NONE,
pop=vm_utils.PopBehavior.ALWAYS,
)
def byte_POP_JUMP_BACKWARD_IF_NONE(self, state, op):
return vm_utils.jump_if(
state, op, self.ctx, jump_if_val=None, pop=vm_utils.PopBehavior.ALWAYS
)
def byte_POP_JUMP_BACKWARD_IF_FALSE(self, state, op):
return self.byte_POP_JUMP_IF_FALSE(state, op)
def byte_POP_JUMP_BACKWARD_IF_TRUE(self, state, op):
return self.byte_POP_JUMP_IF_TRUE(state, op)
def byte_INTERPRETER_EXIT(self, state, op):
del op
return state
def byte_END_FOR(self, state, op):
# No-op in pytype. See comment in `byte_FOR_ITER` for details.
return state
def byte_END_SEND(self, state, op):
# Implements `del STACK[-2]`. Used to clean up when a generator exits.
state, top = state.pop()
return state.set_top(top)
def byte_RESERVED(self, state, op):
del op
return state
def byte_BINARY_SLICE(self, state, op):
state, (obj, start, end) = state.popn(3)
subscr = self.ctx.convert.build_slice(state.node, start, end)
state, ret = vm_utils.call_binary_operator(
state, "__getitem__", obj, subscr, report_errors=True, ctx=self.ctx
)
return state.push(ret)
def byte_STORE_SLICE(self, state, op):
state, (val, obj, start, end) = state.popn(4)
state = state.forward_cfg_node("StoreSlice")
subscr = self.ctx.convert.build_slice(state.node, start, end)
return self.store_subscr(state, obj, subscr, val)
def byte_CLEANUP_THROW(self, state, op):
# In 3.12 the only use of CLEANUP_THROW is for exception handling in
# generators. Pytype elides the opcode in opcodes::_make_opcode_list.
del op
return state
def byte_LOAD_LOCALS(self, state, op):
return state.push(self.frame.f_locals.to_variable(state.node))
def byte_LOAD_FROM_DICT_OR_GLOBALS(self, state, op):
# TODO: b/350910471 - The implementation here is wrong. It was not possible
# to make the python compiler generate this specific bytecode in order to
# test this properly, so doing what's mentioned in the python dis
# documentation : pop one element from the stack and push unsolvable and
# wait for anyone to report a bug so that we can fix this.
state, _ = state.pop()
self.ctx.errorlog.not_supported_yet(
self.frames,
"Please report a pytype bug : using LOAD_FROM_DICT_OR_GLOBALS is",
)
state = state.push(self.ctx.convert.unsolvable.to_variable(state.node))
return state
def byte_LOAD_FROM_DICT_OR_DEREF(self, state, op):
state, loaded_locals = state.pop()
# Current locals have been pushed to top of the stack by LOAD_LOCALS. The
# following calls `self.load_local`, which uses `self.frame.f_locals`
# internally.
assert loaded_locals.data == [self.frame.f_locals]
return self.byte_LOAD_CLASSDEREF(state, op)
def byte_POP_JUMP_IF_NOT_NONE(self, state, op):
return self.byte_POP_JUMP_FORWARD_IF_NOT_NONE(state, op)
def byte_POP_JUMP_IF_NONE(self, state, op):
return self.byte_POP_JUMP_FORWARD_IF_NONE(state, op)
def byte_LOAD_SUPER_ATTR(self, state, op):
"""Implementation of the LOAD_SUPER_ATTR opcode."""
name = op.argval
state, (super_fn, arg_cls, arg_self) = state.popn(3)
# The 2nd-low bit indicates a two-argument super call.
if op.arg & 2:
super_args = (arg_cls, arg_self)
else:
super_args = ()
state, obj = self.call_function_with_state(state, super_fn, super_args)
# The 1st-low bit indicates a method load (similar to LOAD_ATTR).
if op.arg & 1:
state, val = self._load_method(state, obj, name)
else:
with self._suppress_opcode_tracing():
# LOAD_ATTR for @property methods generates an extra opcode trace for
# the implicit function call, which we do not want.
state, val = self.load_attr(state, obj, name)
state = state.push(val)
self.trace_opcode(op, name, (obj, val))
return state
def byte_CALL_INTRINSIC_1(self, state, op):
intrinsic_fn = getattr(self, f"byte_{op.argval}", None)
if intrinsic_fn is None:
raise VirtualMachineError(f"Unknown intrinsic function: {op.argval}")
return intrinsic_fn(state)
def byte_CALL_INTRINSIC_2(self, state, op):
intrinsic_fn = getattr(self, f"byte_{op.argval}", None)
if intrinsic_fn is None:
raise VirtualMachineError(f"Unknown intrinsic function: {op.argval}")
return intrinsic_fn(state)
def byte_INTRINSIC_1_INVALID(self, state):
return state
def byte_INTRINSIC_PRINT(self, state):
# Only used in the interactive interpreter, not in modules.
return state
def byte_INTRINSIC_IMPORT_STAR(self, state):
state = self._import_star(state)
return self._push_null(state)
def byte_INTRINSIC_STOPITERATION_ERROR(self, state):
# Changes StopIteration or StopAsyncIteration to a RuntimeError.
return state
def byte_INTRINSIC_ASYNC_GEN_WRAP(self, state):
return state
def byte_INTRINSIC_UNARY_POSITIVE(self, state):
return self.unary_operator(state, "__pos__")
def byte_INTRINSIC_LIST_TO_TUPLE(self, state):
return self._list_to_tuple(state)
def byte_INTRINSIC_TYPEVAR(self, state):
"""This intrinsic is a synonym to typing.TypeVar."""
state, param = state.pop()
type_var_name = self.ctx.convert.constant_to_var(
param.data[0].pyval, node=state.node
)
args = function.Args(
posargs=(type_var_name,),
namedargs={},
starargs=None,
starstarargs=None,
)
_, ret = function.call_function(
self.ctx,
state.node,
self._typings_type_var,
args=args,
)
state = state.push(ret)
return state
def byte_INTRINSIC_PARAMSPEC(self, state):
"""This intrinsic is a synonym to typing.ParamSpec."""
state, param = state.pop()
type_var_name = self.ctx.convert.constant_to_var(
param.data[0].pyval, node=state.node
)
args = function.Args(
posargs=(type_var_name,),
namedargs={},
starargs=None,
starstarargs=None,
)
_, ret = function.call_function(
self.ctx,
state.node,
self._typings_paramspec,
args=args,
)
state = state.push(ret)
return state
def byte_INTRINSIC_TYPEVARTUPLE(self, state):
# We don't even support importing typing.TypeVarTuple so do the minimal to
# consume all the parameters, put any on the stack and move on.
self.ctx.errorlog.not_supported_yet(
self.frames, "Using TypeVarTuple in Generics is"
)
state, _ = state.pop()
state = state.push(self.ctx.convert.unsolvable.to_variable(state.node))
return state
def byte_INTRINSIC_SUBSCRIPT_GENERIC(self, state):
state, type_parameters = state.pop()
state = state.push(self._typings_generic)
# This will be a tuple of type parameters in order.
state = state.push(type_parameters)
# Returning Generic[S, T]
return self.binary_operator(state, "__getitem__")
def byte_INTRINSIC_TYPEALIAS(self, state):
"""This intrinsic creates a type alias and puts the result on the stack."""
# https://docs.python.org/3.12/library/dis.html states:
# The argument is a tuple of the type alias’s name, type parameters,
# and value. There's no need to use the name because there's a STORE_NAME
# opcode following the call to this intrinsic.
state, param = state.pop()
# TODO: b/412616662 - For pytd generation, it's better to generate type
# aliases as `type MyType = int`, because the type alias is not callable
# if we do Mytype(), thus there should be a diagnostic when you try to call
# a type alias. For now, we generate `type MyType = int` as `MyType = int`
# because the machinery to make a distinction is not in place yet.
# TODO: b/350910471 - support the use of typevar
_, _, funcv = param.data[0].pyval
args = function.Args(
posargs=(),
namedargs={},
starargs=None,
starstarargs=None,
)
_, ret = function.call_function(
self.ctx,
state.node,
funcv,
args=args,
)
state = state.push(ret)
return state
def byte_INTRINSIC_2_INVALID(self, state):
return state
def byte_INTRINSIC_PREP_RERAISE_STAR(self, state):
return state
def byte_INTRINSIC_TYPEVAR_WITH_BOUND(self, state):
"""This intrinsic is a synonym of typing.TypeVar('T', bound=...)."""
# First parameter(bound_func) is the function object that returns the type
# representation of the bound.
state, (type_var_name, bound_func) = state.popn(2)
type_var_name = self.ctx.convert.constant_to_var(
type_var_name.data[0].pyval, node=state.node
)
_, bound = function.call_function(
self.ctx,
state.node,
bound_func,
args=function.Args(
posargs=(),
namedargs={},
starargs=None,
starstarargs=None,
),
)
_, ret = function.call_function(
self.ctx,
state.node,
self._typings_type_var,
args=function.Args(
posargs=(type_var_name,),
namedargs={"bound": bound},
starargs=None,
starstarargs=None,
),
)
state = state.push(ret)
return state
def byte_INTRINSIC_TYPEVAR_WITH_CONSTRAINTS(self, state):
"""This intrinsic is a synonym of typing.TypeVar('T', *constraint)."""
# First parameter(bound_func) is the function object that returns the type
# representation of the bound.
state, (type_var_name, constraint_func) = state.popn(2)
type_var_name = self.ctx.convert.constant_to_var(
type_var_name.data[0].pyval, node=state.node
)
_, constraints = function.call_function(
self.ctx,
state.node,
constraint_func,
args=function.Args(
posargs=(),
namedargs={},
starargs=None,
starstarargs=None,
),
)
_, ret = function.call_function(
self.ctx,
state.node,
self._typings_type_var,
args=function.Args(
posargs=(type_var_name,),
namedargs={},
starargs=constraints,
starstarargs=None,
),
)
state = state.push(ret)
return state
def byte_INTRINSIC_SET_FUNCTION_TYPE_PARAMS(self, state):
# Second parameter here is a type parameter, it's stored to the
# __type_params__ function attribute at runtime, but this information is not
# consumed by us. This intrinsic consumes two parameters which is the
# function object and the type parameter list and returns the function
# object.
state, (func, _) = state.popn(2)
state = state.push(func)
return state
def _bytecode_to_string(bytecode) -> str:
"""Print bytecode in a textual form."""
lines = []
for block_idx, block in enumerate(bytecode.order):
lines.append(f"{block_idx}")
for instruction in block.code:
lines.append(f" {instruction.line} {instruction.name}")
return "\n".join(lines)
| VirtualMachine |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py | {
"start": 22974,
"end": 25526
} | class ____(BaseVariables):
"""
Default variables for the Kubernetes worker.
The schema for this class is used to populate the `variables` section of the default
base job template.
"""
namespace: str = Field(
default="default", description="The Kubernetes namespace to create jobs within."
)
image: Optional[str] = Field(
default=None,
description="The image reference of a container image to use for created jobs. "
"If not set, the latest Prefect image will be used.",
examples=["docker.io/prefecthq/prefect:3-latest"],
)
service_account_name: Optional[str] = Field(
default=None,
description="The Kubernetes service account to use for job creation.",
)
image_pull_policy: Literal["IfNotPresent", "Always", "Never"] = Field(
default=KubernetesImagePullPolicy.IF_NOT_PRESENT,
description="The Kubernetes image pull policy to use for job containers.",
)
backoff_limit: int = Field(
default=0,
ge=0,
title="Backoff Limit",
description=(
"The number of times Kubernetes will retry a job after pod eviction. "
"If set to 0, Prefect will reschedule the flow run when the pod is evicted "
"unless PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR is set to value "
"different from 'reschedule'."
),
)
finished_job_ttl: Optional[int] = Field(
default=None,
title="Finished Job TTL",
description="The number of seconds to retain jobs after completion. If set, "
"finished jobs will be cleaned up by Kubernetes after the given delay. If not "
"set, jobs will be retained indefinitely.",
)
job_watch_timeout_seconds: Optional[int] = Field(
default=None,
description=(
"Number of seconds to wait for each event emitted by a job before "
"timing out. If not set, the worker will wait for each event indefinitely."
),
)
pod_watch_timeout_seconds: int = Field(
default=60,
description="Number of seconds to watch for pod creation before timing out.",
)
stream_output: bool = Field(
default=True,
description=(
"If set, output will be streamed from the job to local standard output."
),
)
cluster_config: Optional[KubernetesClusterConfig] = Field(
default=None,
description="The Kubernetes cluster config to use for job creation.",
)
| KubernetesWorkerVariables |
python | spack__spack | lib/spack/spack/compilers/adaptor.py | {
"start": 264,
"end": 346
} | class ____(enum.Enum):
C = "c"
CXX = "cxx"
FORTRAN = "fortran"
| Languages |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 4851,
"end": 4919
} | class ____(_DynamicFixture):
lazy = "write_only"
| _WriteOnlyFixture |
python | google__jax | jax/experimental/mosaic/gpu/layout_inference.py | {
"start": 2615,
"end": 2833
} | class ____(enum.Enum):
"""The memory space of a variable."""
REG = enum.auto()
SMEM = enum.auto()
TMEM = enum.auto()
_op_name_regex = re.compile(r"^(%\d+ = )?\S+")
@dataclasses.dataclass(frozen=True)
| MemorySpace |
python | walkccc__LeetCode | solutions/2266. Count Number of Texts/2266.py | {
"start": 0,
"end": 821
} | class ____:
def countTexts(self, pressedKeys: str) -> int:
MOD = 1_000_000_007
n = len(pressedKeys)
# dp[i] := the number of possible text messages of pressedKeys[i..n)
dp = [0] * n + [1]
def isSame(s: str, i: int, k: int) -> bool:
"""Returns True if s[i..i + k) are the same digits."""
if i + k > len(s):
return False
for j in range(i + 1, i + k):
if s[j] != s[i]:
return False
return True
for i in reversed(range(n)):
dp[i] = dp[i + 1]
if isSame(pressedKeys, i, 2):
dp[i] += dp[i + 2]
if isSame(pressedKeys, i, 3):
dp[i] += dp[i + 3]
if ((pressedKeys[i] == '7' or pressedKeys[i] == '9') and
isSame(pressedKeys, i, 4)):
dp[i] += dp[i + 4]
dp[i] %= MOD
return dp[0]
| Solution |
python | scipy__scipy | scipy/io/matlab/_mio5_params.py | {
"start": 7781,
"end": 8201
} | class ____(np.ndarray):
"""Subclass for a MATLAB opaque matrix.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be directly instantiated.
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
OPAQUE_DTYPE = np.dtype(
[('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')])
| MatlabOpaque |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/operators/dataproc.py | {
"start": 1434,
"end": 14001
} | class ____(BaseOperator):
"""
Creates Yandex.Cloud Data Proc cluster.
:param folder_id: ID of the folder in which cluster should be created.
:param cluster_name: Cluster name. Must be unique inside the folder.
:param cluster_description: Cluster description.
:param cluster_image_version: Cluster image version. Use default.
:param ssh_public_keys: List of SSH public keys that will be deployed to created compute instances.
:param subnet_id: ID of the subnetwork. All Data Proc cluster nodes will use one subnetwork.
:param services: List of services that will be installed to the cluster. Possible options:
HDFS, YARN, MAPREDUCE, HIVE, TEZ, ZOOKEEPER, HBASE, SQOOP, FLUME, SPARK, SPARK, ZEPPELIN, OOZIE
:param s3_bucket: Yandex.Cloud S3 bucket to store cluster logs.
Jobs will not work if the bucket is not specified.
:param zone: Availability zone to create cluster in.
Currently there are ru-central1-a, ru-central1-b and ru-central1-c.
:param service_account_id: Service account id for the cluster.
Service account can be created inside the folder.
:param environment: Environment for the cluster. Possible options: PRODUCTION, PRESTABLE.
:param masternode_resource_preset: Resources preset (CPU+RAM configuration)
for the primary node of the cluster.
:param masternode_disk_size: Masternode storage size in GiB.
:param masternode_disk_type: Masternode storage type. Possible options: network-ssd, network-hdd.
:param datanode_resource_preset: Resources preset (CPU+RAM configuration)
for the data nodes of the cluster.
:param datanode_disk_size: Datanodes storage size in GiB.
:param datanode_disk_type: Datanodes storage type. Possible options: network-ssd, network-hdd.
:param computenode_resource_preset: Resources preset (CPU+RAM configuration)
for the compute nodes of the cluster.
:param computenode_disk_size: Computenodes storage size in GiB.
:param computenode_disk_type: Computenodes storage type. Possible options: network-ssd, network-hdd.
:param connection_id: ID of the Yandex.Cloud Airflow connection.
:param computenode_max_count: Maximum number of nodes of compute autoscaling subcluster.
:param computenode_warmup_duration: The warmup time of the instance in seconds. During this time,
traffic is sent to the instance,
but instance metrics are not collected. In seconds.
:param computenode_stabilization_duration: Minimum amount of time in seconds for monitoring before
Instance Groups can reduce the number of instances in the group.
During this time, the group size doesn't decrease,
even if the new metric values indicate that it should. In seconds.
:param computenode_preemptible: Preemptible instances are stopped at least once every 24 hours,
and can be stopped at any time if their resources are needed by Compute.
:param computenode_cpu_utilization_target: Defines an autoscaling rule
based on the average CPU utilization of the instance group.
in percents. 10-100.
By default is not set and default autoscaling strategy is used.
:param computenode_decommission_timeout: Timeout to gracefully decommission nodes during downscaling.
In seconds
:param properties: Properties passed to main node software.
Docs: https://cloud.yandex.com/docs/data-proc/concepts/settings-list
:param enable_ui_proxy: Enable UI Proxy feature for forwarding Hadoop components web interfaces
Docs: https://cloud.yandex.com/docs/data-proc/concepts/ui-proxy
:param host_group_ids: Dedicated host groups to place VMs of cluster on.
Docs: https://cloud.yandex.com/docs/compute/concepts/dedicated-host
:param security_group_ids: User security groups.
Docs: https://cloud.yandex.com/docs/data-proc/concepts/network#security-groups
:param log_group_id: Id of log group to write logs. By default logs will be sent to default log group.
To disable cloud log sending set cluster property dataproc:disable_cloud_logging = true
Docs: https://cloud.yandex.com/docs/data-proc/concepts/logs
:param initialization_actions: Set of init-actions to run when cluster starts.
Docs: https://cloud.yandex.com/docs/data-proc/concepts/init-action
:param oslogin_enabled: Enable authorization via OS Login for cluster.
:param labels: Cluster labels as key:value pairs. No more than 64 per resource.
Docs: https://cloud.yandex.com/docs/resource-manager/concepts/labels
"""
def __init__(
self,
*,
folder_id: str | None = None,
cluster_name: str | None = None,
cluster_description: str | None = "",
cluster_image_version: str | None = None,
ssh_public_keys: str | Iterable[str] | None = None,
subnet_id: str | None = None,
services: Iterable[str] | None = ("HDFS", "YARN", "MAPREDUCE", "HIVE", "SPARK"),
s3_bucket: str | None = None,
zone: str = "ru-central1-b",
service_account_id: str | None = None,
environment: str | None = None,
masternode_resource_preset: str | None = None,
masternode_disk_size: int | None = None,
masternode_disk_type: str | None = None,
datanode_resource_preset: str | None = None,
datanode_disk_size: int | None = None,
datanode_disk_type: str | None = None,
datanode_count: int = 1,
computenode_resource_preset: str | None = None,
computenode_disk_size: int | None = None,
computenode_disk_type: str | None = None,
computenode_count: int = 0,
computenode_max_hosts_count: int | None = None,
computenode_measurement_duration: int | None = None,
computenode_warmup_duration: int | None = None,
computenode_stabilization_duration: int | None = None,
computenode_preemptible: bool = False,
computenode_cpu_utilization_target: int | None = None,
computenode_decommission_timeout: int | None = None,
connection_id: str | None = None,
properties: dict[str, str] | None = None,
enable_ui_proxy: bool = False,
host_group_ids: Iterable[str] | None = None,
security_group_ids: Iterable[str] | None = None,
log_group_id: str | None = None,
initialization_actions: Iterable[InitializationAction] | None = None,
oslogin_enabled: bool = False,
labels: dict[str, str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if ssh_public_keys is None:
ssh_public_keys = []
self.folder_id = folder_id
self.yandex_conn_id = connection_id
self.cluster_name = cluster_name
self.cluster_description = cluster_description
self.cluster_image_version = cluster_image_version
self.ssh_public_keys = ssh_public_keys
self.subnet_id = subnet_id
self.services = services
self.s3_bucket = s3_bucket
self.zone = zone
self.service_account_id = service_account_id
self.environment = environment
self.masternode_resource_preset = masternode_resource_preset
self.masternode_disk_size = masternode_disk_size
self.masternode_disk_type = masternode_disk_type
self.datanode_resource_preset = datanode_resource_preset
self.datanode_disk_size = datanode_disk_size
self.datanode_disk_type = datanode_disk_type
self.datanode_count = datanode_count
self.computenode_resource_preset = computenode_resource_preset
self.computenode_disk_size = computenode_disk_size
self.computenode_disk_type = computenode_disk_type
self.computenode_count = computenode_count
self.computenode_max_hosts_count = computenode_max_hosts_count
self.computenode_measurement_duration = computenode_measurement_duration
self.computenode_warmup_duration = computenode_warmup_duration
self.computenode_stabilization_duration = computenode_stabilization_duration
self.computenode_preemptible = computenode_preemptible
self.computenode_cpu_utilization_target = computenode_cpu_utilization_target
self.computenode_decommission_timeout = computenode_decommission_timeout
self.properties = properties
self.enable_ui_proxy = enable_ui_proxy
self.host_group_ids = host_group_ids
self.security_group_ids = security_group_ids
self.log_group_id = log_group_id
self.initialization_actions = initialization_actions
self.oslogin_enabled = oslogin_enabled
self.labels = labels
self.hook: DataprocHook | None = None
def execute(self, context: Context) -> dict:
self.hook = DataprocHook(
yandex_conn_id=self.yandex_conn_id,
)
kwargs_depends_on_version = {}
if yandexcloud.__version__ >= "0.350.0":
kwargs_depends_on_version.update(
{"oslogin_enabled": self.oslogin_enabled, "environment": self.environment}
)
operation_result = self.hook.dataproc_client.create_cluster(
folder_id=self.folder_id,
cluster_name=self.cluster_name,
cluster_description=self.cluster_description,
cluster_image_version=self.cluster_image_version,
ssh_public_keys=self.ssh_public_keys,
subnet_id=self.subnet_id,
services=self.services,
s3_bucket=self.s3_bucket,
zone=self.zone,
service_account_id=self.service_account_id or self.hook.default_service_account_id,
masternode_resource_preset=self.masternode_resource_preset,
masternode_disk_size=self.masternode_disk_size,
masternode_disk_type=self.masternode_disk_type,
datanode_resource_preset=self.datanode_resource_preset,
datanode_disk_size=self.datanode_disk_size,
datanode_disk_type=self.datanode_disk_type,
datanode_count=self.datanode_count,
computenode_resource_preset=self.computenode_resource_preset,
computenode_disk_size=self.computenode_disk_size,
computenode_disk_type=self.computenode_disk_type,
computenode_count=self.computenode_count,
computenode_max_hosts_count=self.computenode_max_hosts_count,
computenode_measurement_duration=self.computenode_measurement_duration,
computenode_warmup_duration=self.computenode_warmup_duration,
computenode_stabilization_duration=self.computenode_stabilization_duration,
computenode_preemptible=self.computenode_preemptible,
computenode_cpu_utilization_target=self.computenode_cpu_utilization_target,
computenode_decommission_timeout=self.computenode_decommission_timeout,
properties=self.properties,
enable_ui_proxy=self.enable_ui_proxy,
host_group_ids=self.host_group_ids,
security_group_ids=self.security_group_ids,
log_group_id=self.log_group_id,
labels=self.labels,
initialization_actions=[
self.hook.sdk.wrappers.InitializationAction(
uri=init_action.uri,
args=init_action.args,
timeout=init_action.timeout,
)
for init_action in self.initialization_actions
]
if self.initialization_actions
else None,
**kwargs_depends_on_version,
)
cluster_id = operation_result.response.id
context["task_instance"].xcom_push(key="cluster_id", value=cluster_id)
# Deprecated
context["task_instance"].xcom_push(key="yandexcloud_connection_id", value=self.yandex_conn_id)
return cluster_id
@property
def cluster_id(self):
return self.output
| DataprocCreateClusterOperator |
python | pandas-dev__pandas | pandas/tests/series/methods/test_cov_corr.py | {
"start": 1589,
"end": 5770
} | class ____:
def test_corr(self, datetime_series, any_float_dtype):
stats = pytest.importorskip("scipy.stats")
datetime_series = datetime_series.astype(any_float_dtype)
# full overlap
tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = Series(
np.arange(10, dtype=np.float64),
index=date_range("2020-01-01", periods=10),
name="ts",
)
result = A.corr(A)
expected, _ = stats.pearsonr(A, A)
tm.assert_almost_equal(result, expected)
def test_corr_rank(self):
stats = pytest.importorskip("scipy.stats")
# kendall and spearman
B = Series(
np.arange(10, dtype=np.float64),
index=date_range("2020-01-01", periods=10),
name="ts",
)
A = Series(
np.concatenate([np.arange(5, dtype=np.float64)] * 2),
index=date_range("2020-01-01", periods=10),
name="ts",
)
result = A.corr(B, method="kendall")
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method="spearman")
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# results from R
A = Series(
[
-0.89926396,
0.94209606,
-1.03289164,
-0.95445587,
0.76910310,
-0.06430576,
-2.09704447,
0.40660407,
-0.89926396,
0.94209606,
]
)
B = Series(
[
-1.01270225,
-0.62210117,
-1.56895827,
0.59592943,
-0.01680292,
1.17258718,
-1.06009347,
-0.10222060,
-0.89076239,
0.89372375,
]
)
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
def test_corr_invalid_method(self):
# GH PR #22298
s1 = Series(np.random.default_rng(2).standard_normal(10))
s2 = Series(np.random.default_rng(2).standard_normal(10))
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
with pytest.raises(ValueError, match=msg):
s1.corr(s2, method="____")
def test_corr_callable_method(self, datetime_series):
# simple correlation example
# returns 1 if exact equality, 0 otherwise
my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
# simple example
s1 = Series([1, 2, 3, 4, 5])
s2 = Series([5, 4, 3, 2, 1])
expected = 0
tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
# full overlap
tm.assert_almost_equal(
datetime_series.corr(datetime_series, method=my_corr), 1.0
)
# partial overlap
tm.assert_almost_equal(
datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
)
# No overlap
assert np.isnan(
datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
)
# dataframe example
df = pd.DataFrame([s1, s2])
expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
| TestSeriesCorr |
python | Netflix__metaflow | metaflow/_vendor/click/_winconsole.py | {
"start": 5226,
"end": 6041
} | class ____(object):
def __init__(self, text_stream, byte_stream):
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self):
return self.buffer.name
def write(self, x):
if isinstance(x, text_type):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._text_stream, name)
def isatty(self):
return self.buffer.isatty()
def __repr__(self):
return "<ConsoleStream name={!r} encoding={!r}>".format(
self.name, self.encoding
)
| ConsoleStream |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 103822,
"end": 106554
} | class ____(fixtures.MappedTest):
"""'viewonly' mappings with unique PK column names."""
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"t1id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", String(40)),
)
Table(
"t2",
metadata,
Column(
"t2id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", String(40)),
Column("t1id_ref", Integer, ForeignKey("t1.t1id")),
)
Table(
"t3",
metadata,
Column(
"t3id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", String(40)),
Column("t2id_ref", Integer, ForeignKey("t2.t2id")),
)
def test_three_table_view(self):
"""A three table join with overlapping PK names.
A third table is pulled into the primary join condition using unique
PK column names and should not produce 'mapper has no columnX' error.
"""
t2, t3, t1 = (self.tables.t2, self.tables.t3, self.tables.t1)
class C1(BasicEntity):
pass
class C2(BasicEntity):
pass
class C3(BasicEntity):
pass
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"t2s": relationship(C2),
"t2_view": relationship(
C2,
viewonly=True,
primaryjoin=sa.and_(
t1.c.t1id == t2.c.t1id_ref,
t3.c.t2id_ref == t2.c.t2id,
t3.c.data == t1.c.data,
),
),
},
)
self.mapper_registry.map_imperatively(C2, t2)
self.mapper_registry.map_imperatively(
C3, t3, properties={"t2": relationship(C2)}
)
c1 = C1()
c1.data = "c1data"
c2a = C2()
c1.t2s.append(c2a)
c2b = C2()
c1.t2s.append(c2b)
c3 = C3()
c3.data = "c1data"
c3.t2 = c2b
sess = fixture_session()
sess.add_all((c1, c3))
sess.flush()
sess.expunge_all()
c1 = sess.get(C1, c1.t1id)
assert {x.t2id for x in c1.t2s} == {c2a.t2id, c2b.t2id}
assert {x.t2id for x in c1.t2_view} == {c2b.t2id}
| ViewOnlyUniqueNames |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 11915,
"end": 12225
} | class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.FRIENDLIAI, frozen=True, exclude=True
)
temperature: Optional[float]
model: Optional[str]
maxTokens: Optional[int]
baseURL: Optional[str]
| _GenerativeFriendliai |
python | doocs__leetcode | solution/0200-0299/0239.Sliding Window Maximum/Solution.py | {
"start": 0,
"end": 377
} | class ____:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
q = [(-v, i) for i, v in enumerate(nums[: k - 1])]
heapify(q)
ans = []
for i in range(k - 1, len(nums)):
heappush(q, (-nums[i], i))
while q[0][1] <= i - k:
heappop(q)
ans.append(-q[0][0])
return ans
| Solution |
python | nedbat__coveragepy | coverage/exceptions.py | {
"start": 993,
"end": 1092
} | class ____(CoverageException):
"""We couldn't find the source for a module."""
pass
| NoSource |
python | aio-libs__aiohttp | aiohttp/_websocket/models.py | {
"start": 2899,
"end": 3170
} | class ____(Exception):
"""WebSocket protocol parser error."""
def __init__(self, code: int, message: str) -> None:
self.code = code
super().__init__(code, message)
def __str__(self) -> str:
return cast(str, self.args[1])
| WebSocketError |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 2289,
"end": 2419
} | class ____[Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, *Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb, **Cccccccccccccccccccccc]:
pass
| TestTypeParams |
python | faif__python-patterns | patterns/structural/bridge.py | {
"start": 386,
"end": 552
} | class ____:
def draw_circle(self, x: int, y: int, radius: float) -> None:
print(f"API2.circle at {x}:{y} radius {radius}")
# Refined Abstraction
| DrawingAPI2 |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-collect-all-apples-in-a-tree.py | {
"start": 50,
"end": 1101
} | class ____(object):
def minTime(self, n, edges, hasApple):
"""
:type n: int
:type edges: List[List[int]]
:type hasApple: List[bool]
:rtype: int
"""
graph = collections.defaultdict(list)
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
result = [0, 0]
s = [(1, (-1, 0, result))]
while s:
step, params = s.pop()
if step == 1:
par, node, ret = params
ret[:] = [0, int(hasApple[node])]
for nei in reversed(graph[node]):
if nei == par:
continue
new_ret = [0, 0]
s.append((2, (new_ret, ret)))
s.append((1, (node, nei, new_ret)))
else:
new_ret, ret = params
ret[0] += new_ret[0]+new_ret[1]
ret[1] |= bool(new_ret[0]+new_ret[1])
return 2*result[0]
# Time: O(n)
# Space: O(n)
| Solution |
python | ipython__ipython | IPython/core/magic_arguments.py | {
"start": 9104,
"end": 9459
} | class ____(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument_group().
Instances also serve to decorate command methods.
"""
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds)
| argument_group |
python | Pylons__pyramid | tests/test_request.py | {
"start": 24448,
"end": 24646
} | class ____:
def __init__(self, result):
self.result = result
def generate(self, path, request, **kw):
self.args = path, request, kw
return self.result
| DummyStaticURLInfo |
python | dagster-io__dagster | python_modules/libraries/dagster-k8s/dagster_k8s/client.py | {
"start": 1086,
"end": 1876
} | class ____(Exception):
def __init__(self, *args, **kwargs):
k8s_api_exception = check.inst_param(
kwargs.pop("k8s_api_exception"), "k8s_api_exception", Exception
)
original_exc_info = check.tuple_param(kwargs.pop("original_exc_info"), "original_exc_info")
max_retries = check.int_param(kwargs.pop("max_retries"), "max_retries")
check.invariant(original_exc_info[0] is not None)
super().__init__(
f"Retry limit of {max_retries} exceeded: " + args[0],
*args[1:],
**kwargs,
)
self.k8s_api_exception = check.opt_inst_param(
k8s_api_exception, "k8s_api_exception", Exception
)
self.original_exc_info = original_exc_info
| DagsterK8sAPIRetryLimitExceeded |
python | dask__distributed | distributed/diagnostics/progressbar.py | {
"start": 9973,
"end": 15837
} | class ____(MultiProgressBar):
"""Multiple progress bar Widget suitable for the notebook
Displays multiple progress bars for a computation, split on computation
type.
See Also
--------
progress: User-level function <--- use this
MultiProgress: Non-visualization component that contains most logic
ProgressWidget: Single progress bar widget
"""
def __init__(
self,
keys,
scheduler=None,
minimum=0,
**kwargs,
):
super().__init__(keys, scheduler, **kwargs)
from ipywidgets import VBox
self.widget = VBox([])
def make_widget(self, all):
from ipywidgets import HTML, FloatProgress, HBox, VBox
def make_label(key):
if isinstance(key, tuple):
# tuple of (group_name, group_id)
key = key[0]
key = key.decode() if isinstance(key, bytes) else key
return html.escape(key)
self.elapsed_time = HTML("")
self.bars = {key: FloatProgress(min=0, max=1, description="") for key in all}
self.bar_texts = {key: HTML("") for key in all}
self.bar_labels = {
key: HTML(
'<div style="padding: 0px 10px 0px 10px;'
" text-align:left; word-wrap: "
'break-word;">' + make_label(key) + "</div>"
)
for key in all
}
def keyfunc(kv):
"""Order keys by most numerous, then by string name"""
return kv[::-1]
key_order = [k for k, v in sorted(all.items(), key=keyfunc, reverse=True)]
self.bar_widgets = VBox(
[
HBox([self.bar_texts[key], self.bars[key], self.bar_labels[key]])
for key in key_order
]
)
self.widget.children = (self.elapsed_time, self.bar_widgets)
def _ipython_display_(self, **kwargs):
IOLoop.current().add_callback(self.listen)
from IPython.display import display
display(self.widget, **kwargs)
def _draw_stop(self, remaining, status, exception=None, key=None, **kwargs):
for k, v in remaining.items():
if not v:
self.bars[k].bar_style = "success"
else:
self.bars[k].bar_style = "danger"
if status == "error":
_, exception, _ = clean_exception(exception)
# self.bars[self.func(key)].bar_style = 'danger' # TODO
self.elapsed_time.value = (
'<div style="padding: 0px 10px 5px 10px"><b>Exception</b> '
+ "<tt>"
+ repr(exception)
+ "</tt>:"
+ format_time(self.elapsed)
+ " "
+ "</div>"
)
else:
self.elapsed_time.value = (
'<div style="padding: 0px 10px 5px 10px">'
f"<b>Finished:</b> {format_time(self.elapsed) if self.keys else 'no tasks given'}</div>"
"</div>"
)
def _draw_bar(self, remaining, all, status, **kwargs):
if not self.widget.children:
self.make_widget(all)
for k, ntasks in all.items():
ndone = ntasks - remaining[k]
self.elapsed_time.value = (
'<div style="padding: 0px 10px 5px 10px"><b>Computing:</b> '
+ format_time(self.elapsed)
+ "</div>"
)
self.bars[k].value = ndone / ntasks if ntasks else 1.0
self.bar_texts[k].value = (
'<div style="padding: 0px 10px 0px 10px; text-align: right">%d / %d</div>'
% (ndone, ntasks)
)
def progress(
*futures, notebook=None, multi=True, complete=True, group_by="prefix", **kwargs
):
"""Track progress of futures
This operates differently in the notebook and the console
* Notebook: This returns immediately, leaving an IPython widget on screen
* Console: This blocks until the computation completes
Parameters
----------
futures : Futures
A list of futures or keys to track
notebook : bool (optional)
Running in the notebook or not (defaults to guess)
multi : bool (optional)
Track different functions independently (defaults to True)
complete : bool (optional)
Track all keys (True) or only keys that have not yet run (False)
(defaults to True)
group_by : Callable | Literal["spans"] | Literal["prefix"]
Use spans instead of task key names for grouping tasks
(defaults to "prefix")
Notes
-----
In the notebook, the output of `progress` must be the last statement
in the cell. Typically, this means calling `progress` at the end of a
cell.
Examples
--------
>>> progress(futures) # doctest: +SKIP
[########################################] | 100% Completed | 1.7s
"""
futures = futures_of(futures)
if not isinstance(futures, (set, list)):
futures = [futures]
if notebook is None:
notebook = is_kernel() # often but not always correct assumption
if kwargs.get("func", None) is not None:
warnings.warn(
"`func` is deprecated, use `group_by` instead", category=DeprecationWarning
)
group_by = kwargs.pop("func")
if group_by not in ("spans", "prefix") and not isinstance(group_by, Callable):
raise ValueError("`group_by` should be 'spans', 'prefix', or a Callable")
if notebook:
if multi:
bar = MultiProgressWidget(
futures, complete=complete, group_by=group_by, **kwargs
)
else:
bar = ProgressWidget(futures, complete=complete, **kwargs)
return bar
else:
TextProgressBar(futures, complete=complete, **kwargs)
| MultiProgressWidget |
python | scipy__scipy | scipy/spatial/tests/test_hausdorff.py | {
"start": 304,
"end": 8217
} | class ____:
# Test various properties of the directed Hausdorff code.
def setup_method(self):
np.random.seed(1234)
random_angles = np.random.random(100) * np.pi * 2
random_columns = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
random_columns_2 = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
# move one point farther out so we don't have two perfect circles
random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
self.path_1 = random_columns
self.path_2 = random_columns_2
self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1)
self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1)
def test_symmetry(self):
# Ensure that the directed (asymmetric) Hausdorff distance is
# actually asymmetric
forward = directed_hausdorff(self.path_1, self.path_2)[0]
reverse = directed_hausdorff(self.path_2, self.path_1)[0]
assert forward != reverse
def test_brute_force_comparison_forward(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# forward direction.
actual = directed_hausdorff(self.path_1, self.path_2)[0]
# brute force over rows:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=1))
assert_allclose(actual, expected)
def test_brute_force_comparison_reverse(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# reverse direction.
actual = directed_hausdorff(self.path_2, self.path_1)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=0))
assert_allclose(actual, expected)
def test_degenerate_case(self):
# The directed Hausdorff distance must be zero if both input
# data arrays match.
actual = directed_hausdorff(self.path_1, self.path_1)[0]
assert_allclose(actual, 0.0)
def test_2d_data_forward(self):
# Ensure that 2D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_1[..., :2],
self.path_2[..., :2])[0]
expected = max(np.amin(distance.cdist(self.path_1[..., :2],
self.path_2[..., :2]),
axis=1))
assert_allclose(actual, expected)
def test_4d_data_reverse(self):
# Ensure that 4D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d),
axis=0))
assert_allclose(actual, expected)
def test_indices(self):
# Ensure that correct point indices are returned -- they should
# correspond to the Hausdorff pair
path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]])
path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]])
actual = directed_hausdorff(path_simple_2, path_simple_1)[1:]
expected = (2, 3)
assert_array_equal(actual, expected)
def test_random_state(self):
# ensure that the global random state is not modified because
# the directed Hausdorff algorithm uses randomization
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
@pytest.mark.parametrize("seed", [None, 27870671, np.random.default_rng(177)])
def test_random_state_None_int(self, seed):
# check that seed values of None or int do not alter global
# random state
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2, seed)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
def test_invalid_dimensions(self):
# Ensure that a ValueError is raised when the number of columns
# is not the same
rng = np.random.default_rng(189048172503940875434364128139223470523)
A = rng.random((3, 2))
B = rng.random((3, 5))
msg = r"need to have the same number of columns"
with pytest.raises(ValueError, match=msg):
directed_hausdorff(A, B)
# preserve use of legacy keyword `seed` during SPEC 7 transition
@pytest.mark.parametrize("A, B, seed, expected", [
# the two cases from gh-11332
([(0,0)],
[(0,1), (0,0)],
np.int64(0),
(0.0, 0, 1)),
([(0,0)],
[(0,1), (0,0)],
1,
(0.0, 0, 1)),
# gh-11332 cases with a Generator
([(0,0)],
[(0,1), (0,0)],
np.random.default_rng(0),
(0.0, 0, 1)),
([(0,0)],
[(0,1), (0,0)],
np.random.default_rng(1),
(0.0, 0, 1)),
# slightly more complex case
([(-5, 3), (0,0)],
[(0,1), (0,0), (-5, 3)],
77098,
# the maximum minimum distance will
# be the last one found, but a unique
# solution is not guaranteed more broadly
(0.0, 1, 1)),
# repeated with Generator seeding
([(-5, 3), (0,0)],
[(0,1), (0,0), (-5, 3)],
np.random.default_rng(77098),
# NOTE: using a Generator changes the
# indices but not the distance (unique solution
# not guaranteed)
(0.0, 0, 2)),
])
def test_subsets(self, A, B, seed, expected, num_parallel_threads):
# verify fix for gh-11332
actual = directed_hausdorff(u=A, v=B, seed=seed)
# check distance
assert_allclose(actual[0], expected[0])
starting_seed = seed
if hasattr(seed, 'bit_generator'):
starting_seed = seed.bit_generator._seed_seq.entropy
# check indices
if num_parallel_threads == 1 or starting_seed != 77098:
assert actual[1:] == expected[1:]
if not isinstance(seed, np.random.RandomState):
# Check that new `rng` keyword is also accepted
actual = directed_hausdorff(u=A, v=B, rng=seed)
assert_allclose(actual[0], expected[0])
@pytest.mark.xslow
def test_massive_arr_overflow():
# on 64-bit systems we should be able to
# handle arrays that exceed the indexing
# size of a 32-bit signed integer
try:
import psutil
except ModuleNotFoundError:
pytest.skip("psutil required to check available memory")
if psutil.virtual_memory().available < 80*2**30:
# Don't run the test if there is less than 80 gig of RAM available.
pytest.skip('insufficient memory available to run this test')
size = int(3e9)
arr1 = np.zeros(shape=(size, 2))
arr2 = np.zeros(shape=(3, 2))
arr1[size - 1] = [5, 5]
actual = directed_hausdorff(u=arr1, v=arr2)
assert_allclose(actual[0], 7.0710678118654755)
assert_allclose(actual[1], size - 1)
| TestHausdorff |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/autoVariance4.py | {
"start": 539,
"end": 589
} | class ____(Generic[T_co]):
pass
| Parent_Covariant |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 64822,
"end": 65651
} | class ____:
def __init__(self, root):
self.root = os.path.abspath(root)
def store(self, fetcher, relative_dest):
# skip fetchers that aren't cachable
if not fetcher.cachable:
return
# Don't store things that are already cached.
if isinstance(fetcher, CacheURLFetchStrategy):
return
dst = os.path.join(self.root, relative_dest)
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
def fetcher(self, target_path: str, digest: Optional[str], **kwargs) -> CacheURLFetchStrategy:
path = os.path.join(self.root, target_path)
url = url_util.path_to_file_url(path)
return CacheURLFetchStrategy(url=url, checksum=digest, **kwargs)
def destroy(self):
shutil.rmtree(self.root, ignore_errors=True)
| FsCache |
python | ApeWorX__ape | src/ape/pytest/fixtures.py | {
"start": 17301,
"end": 17898
} | class ____:
"""
All the data necessary for accurately supporting isolation.
"""
scope: Scope
"""Corresponds to fixture scope."""
identifier: Optional["SnapshotID"] = None
"""Snapshot ID taken before the peer-fixtures in the same scope."""
fixtures: list = field(default_factory=list)
"""All peer fixtures, tracked so we know when new ones are added."""
def append_fixtures(self, fixtures: Iterable[str]):
for fixture in fixtures:
if fixture in self.fixtures:
continue
self.fixtures.append(fixture)
| Snapshot |
python | huggingface__transformers | src/transformers/models/helium/modular_helium.py | {
"start": 5177,
"end": 5415
} | class ____(GemmaForTokenClassification):
pass
__all__ = [
"HeliumPreTrainedModel",
"HeliumModel",
"HeliumForCausalLM",
"HeliumForSequenceClassification",
"HeliumForTokenClassification",
]
| HeliumForTokenClassification |
python | facelessuser__pymdown-extensions | pymdownx/blocks/tab.py | {
"start": 3815,
"end": 8923
} | class ____(Block):
"""
Tabbed container.
Arguments (1 required):
- A tab title.
Options:
- `new` (boolean): since consecutive tabs are automatically grouped, `new` can force a tab
to start a new tab container.
Content:
Detail body.
"""
NAME = 'tab'
ARGUMENT = True
OPTIONS = {
'new': (False, type_boolean),
'select': (False, type_boolean)
}
def on_init(self):
"""Handle initialization."""
self.alternate_style = self.config['alternate_style']
self.slugify = callable(self.config['slugify'])
# Track tab group count across the entire page.
if 'tab_group_count' not in self.tracker:
self.tracker['tab_group_count'] = 0
self.tab_content = None
def last_child(self, parent):
"""Return the last child of an `etree` element."""
if len(parent):
return parent[-1]
else:
return None
def on_add(self, block):
"""Adjust where the content is added."""
if self.tab_content is None:
if self.alternate_style:
for d in block.findall('div'):
c = d.attrib['class']
if c == 'tabbed-content' or c.startswith('tabbed-content '):
self.tab_content = list(d)[-1]
break
else:
self.tab_content = list(block)[-1]
return self.tab_content
def on_create(self, parent):
"""Create the element."""
new_group = self.options['new']
select = self.options['select']
title = self.argument
sibling = self.last_child(parent)
tabbed_set = 'tabbed-set' if not self.alternate_style else 'tabbed-set tabbed-alternate'
index = 0
labels = None
content = None
if (
sibling is not None and sibling.tag.lower() == 'div' and
sibling.attrib.get('class', '') == tabbed_set and
not new_group
):
first = False
tab_group = sibling
if self.alternate_style:
index = [index for index, _ in enumerate(tab_group.findall('input'), 1)][-1]
for d in tab_group.findall('div'):
if d.attrib['class'] == 'tabbed-labels':
labels = d
elif d.attrib['class'] == 'tabbed-content':
content = d
if labels is not None and content is not None:
break
else:
first = True
self.tracker['tab_group_count'] += 1
tab_group = etree.SubElement(
parent,
'div',
{'class': tabbed_set, 'data-tabs': '%d:0' % self.tracker['tab_group_count']}
)
if self.alternate_style:
labels = etree.SubElement(
tab_group,
'div',
{'class': 'tabbed-labels'}
)
content = etree.SubElement(
tab_group,
'div',
{'class': 'tabbed-content'}
)
data = tab_group.attrib['data-tabs'].split(':')
tab_set = int(data[0])
tab_count = int(data[1]) + 1
attributes = {
"name": "__tabbed_%d" % tab_set,
"type": "radio"
}
if not self.slugify:
attributes['id'] = "__tabbed_%d_%d" % (tab_set, tab_count)
attributes2 = {"for": "__tabbed_%d_%d" % (tab_set, tab_count)} if not self.slugify else {}
if first or select:
attributes['checked'] = 'checked'
# Remove any previously assigned "checked states" to siblings
for i in tab_group.findall('input'):
if i.attrib.get('name', '') == f'__tabbed_{tab_set}':
if 'checked' in i.attrib:
del i.attrib['checked']
if self.alternate_style:
input_el = etree.Element(
'input',
attributes
)
tab_group.insert(index, input_el)
lab = etree.SubElement(
labels,
"label",
attributes2
)
lab.text = title
attrib = {'class': 'tabbed-block'}
etree.SubElement(
content,
"div",
attrib
)
else:
etree.SubElement(
tab_group,
'input',
attributes
)
lab = etree.SubElement(
tab_group,
"label",
attributes2
)
lab.text = title
etree.SubElement(
tab_group,
"div",
{
"class": "tabbed-content"
}
)
tab_group.attrib['data-tabs'] = '%d:%d' % (tab_set, tab_count)
return tab_group
| Tab |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.