language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pennersr__django-allauth | allauth/idp/oidc/internal/oauthlib/request_validator.py | {
"start": 620,
"end": 16302
} | class ____(RequestValidator):
def validate_client_id(self, client_id: str, request):
client = self._lookup_client(request, client_id)
if not client:
return False
self._use_client(request, client)
return True
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
return is_redirect_uri_allowed(
redirect_uri,
request.client.get_redirect_uris(),
request.client.allow_uri_wildcards,
)
def validate_response_type(
self, client_id, response_type, client, request, *args, **kwargs
):
return response_type in request.client.get_response_types()
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
return set(scopes).issubset(request.client.get_scopes())
def get_default_scopes(self, client_id, request, *args, **kwargs):
return request.client.get_default_scopes()
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
# WORKAROUND: docstring says:
# > To support OIDC, you MUST associate the code with:
# > - nonce, if present (``code["nonce"]``)
# Yet, nonce is not there, it is in request.nonce.
nonce = getattr(request, "nonce", None)
if nonce:
code = dict(**code, nonce=nonce)
# (end WORKAROUND)
authorization_codes.create(request.client, code, request)
def authenticate_client_id(self, client_id, request, *args, **kwargs) -> bool:
"""Ensure client_id belong to a non-confidential client."""
client = self._lookup_client(request, client_id)
if not client or client.type != Client.Type.PUBLIC:
return False
self._use_client(request, client)
return True
def authenticate_client(self, request, *args, **kwargs) -> bool:
client_id = getattr(request, "client_id", None)
client_secret = getattr(request, "client_secret", None)
if not isinstance(client_id, str):
return False
if not client_secret and request.grant_type == Client.GrantType.DEVICE_CODE:
return self.authenticate_client_id(client_id, request)
if not client_secret or not isinstance(client_secret, str):
return False
client = self._lookup_client(request, client_id)
if not client:
return False
if not client.check_secret(client_secret):
return False
self._use_client(request, client)
return True
def validate_grant_type(
self, client_id, grant_type, client, request, *args, **kwargs
):
return grant_type in client.get_grant_types()
def validate_code(self, client_id, code, client, request, *args, **kwargs):
return authorization_codes.validate(client_id, code, request)
def confirm_redirect_uri(
self, client_id, code, redirect_uri, client, request, *args, **kwargs
) -> bool:
authorization_code = self._lookup_authorization_code(request, client_id, code)
if not authorization_code:
return False
return redirect_uri == authorization_code["redirect_uri"]
def save_bearer_token(self, token: dict, request, *args, **kwargs):
"""
https://datatracker.ietf.org/doc/html/rfc6749#section-6
> The authorization server MAY issue a new refresh token, in which case
> the client MUST discard the old refresh token and replace it with the
> new refresh token. The authorization server MAY revoke the old
> refresh token after issuing a new refresh token to the client. If a
> new refresh token is issued, the refresh token scope MUST be
> identical to that of the refresh token included by the client in the
> request.
https://datatracker.ietf.org/doc/html/rfc6749#section-1.5
> Refresh tokens are issued to the client by the authorization server and
> are used to obtain a new access token when the current access token becomes
> invalid or expires, or to obtain additional access tokens with identical or
> narrower scope
"""
adapter = get_adapter()
refresh_token = token.get("refresh_token")
email = getattr(request, "email", None)
tokens = []
if refresh_token:
refresh_token_hash = adapter.hash_token(refresh_token)
rt = getattr(request, "refresh_token_instance", None)
if rt and not email and "email" in request.scopes:
email = rt.get_scope_email()
if (
rt
and not app_settings.ROTATE_REFRESH_TOKEN
and refresh_token_hash == rt.hash
):
# We reuse our token.
pass
else:
if rt:
# If we have an existing refresh token, drop it, because of:
assert (
app_settings.ROTATE_REFRESH_TOKEN
or refresh_token_hash != rt.hash
) # nosec[assert_used]
rt.delete()
tokens.append(
Token(
client=request.client,
user=request.user,
type=Token.Type.REFRESH_TOKEN,
hash=refresh_token_hash,
)
)
tokens.append(
Token(
client=request.client,
user=request.user,
type=Token.Type.ACCESS_TOKEN,
hash=adapter.hash_token(token["access_token"]),
expires_at=timezone.now() + timedelta(seconds=token["expires_in"]),
)
)
for t in tokens:
t.set_scopes(request.scopes)
if email:
t.set_scope_email(email)
Token.objects.bulk_create(tokens)
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
authorization_codes.invalidate(client_id, code)
def validate_user_match(self, id_token_hint, scopes, claims, request) -> bool:
if not context.request.user:
return False
sub = None
if id_token_hint:
payload = decode_jwt_token(
id_token_hint,
client_id=request.client.id,
verify_exp=True,
verify_iss=True,
)
if payload is None:
return False
sub = payload.get("sub")
session_sub = get_adapter().get_user_sub(
request.client, context.request.user
)
if sub != session_sub:
return False
if claims:
sub = claims.get("sub")
session_sub = get_adapter().get_user_sub(
request.client, context.request.user
)
if sub != session_sub:
return False
return True
def get_authorization_code_scopes(
self, client_id, code, redirect_uri, request
) -> List[str]:
authorization_code = self._lookup_authorization_code(request, client_id, code)
if not authorization_code:
return []
return authorization_code["scopes"]
def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):
authorization_code = self._lookup_authorization_code(request, client_id, code)
return authorization_code["code"].get("nonce")
def get_code_challenge(self, code, request):
ret = None
authorization_code = self._lookup_authorization_code(
request, request.client_id, code
)
if pkce := authorization_code.get("pkce"):
ret = pkce["code_challenge"]
return ret
def get_code_challenge_method(self, code, request):
ret = None
authorization_code = self._lookup_authorization_code(
request, request.client_id, code
)
if pkce := authorization_code.get("pkce"):
ret = pkce["code_challenge_method"]
return ret
def is_pkce_required(self, client_id, request) -> bool:
client = self._lookup_client(request, client_id)
return bool(client and client.type == Client.Type.PUBLIC)
def finalize_id_token(self, id_token: dict, token: dict, token_handler, request):
"""
https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
"""
adapter = get_adapter()
id_token["iss"] = adapter.get_issuer()
id_token["exp"] = id_token["iat"] + app_settings.ID_TOKEN_EXPIRES_IN
id_token["jti"] = uuid.uuid4().hex
email = getattr(request, "email", None)
id_token.update(
adapter.get_claims(
"id_token", request.user, request.client, request.scopes, email=email
)
)
adapter.populate_id_token(id_token, request.client, request.scopes)
jwk_dict, private_key = jwkkit.load_jwk_from_pem(app_settings.PRIVATE_KEY)
return jwt.encode(
id_token, private_key, algorithm="RS256", headers={"kid": jwk_dict["kid"]}
)
def validate_bearer_token(self, token, scopes, request) -> bool:
if not token:
return False
if context.request.GET.get("access_token") == token:
# Supporting tokens in query params is considered bad practice, yet,
# oauthlib supports this. E.g., if access tokens are sent via URI
# query parameters, such tokens may leak to log files and the HTTP
# 'referer'.
return False
instance = Token.objects.lookup(Token.Type.ACCESS_TOKEN, token)
if not instance:
return False
if not instance.user or not instance.user.is_active:
return False
granted_scopes = instance.get_scopes()
if not set(scopes).issubset(set(granted_scopes)):
return False
request.user = instance.user
self._use_client(request, instance.client)
request.scopes = granted_scopes
request.access_token = instance
return True
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
if token_type_hint == "access_token": # nosec
types = [Token.Type.ACCESS_TOKEN]
elif token_type_hint == "refresh_token": # nosec
types = [Token.Type.REFRESH_TOKEN]
else:
types = [Token.Type.ACCESS_TOKEN, Token.Type.REFRESH_TOKEN]
Token.objects.by_value(token).filter(type__in=types).delete()
def get_userinfo_claims(self, request):
email = request.access_token.get_scope_email()
return get_adapter().get_claims(
"userinfo", request.user, request.client, request.scopes, email=email
)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# https://openid.net/specs/openid-financial-api-part-1-1_0.html#section-5.2.2
# 9. shall require the redirect_uri in the authorization request;
# So, don't support a default.
return None
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Note that this bypasses MFA, which is why the password grant is not
recommended and hence disabled. This could work:
try:
user = get_account_adapter().authenticate(
context.request, username=username, password=password
)
except ValidationError:
return False
else:
if not user:
return False
request.user = user
return True
"""
return False
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
token = Token.objects.filter(client=client).lookup(
Token.Type.REFRESH_TOKEN, refresh_token
)
if not token:
return False
if not token.user or not token.user.is_active:
return False
request.user = token.user
request.refresh_token_instance = token
return True
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
return request.refresh_token_instance.get_scopes()
def client_authentication_required(self, request, *args, **kwargs) -> bool:
if request.client_id and request.client_secret:
return True
client = self._lookup_client(request, request.client_id)
if client and client.type == Client.Type.PUBLIC:
return False
return super().client_authentication_required(request, *args, **kwargs)
def _lookup_client(self, request, client_id) -> Optional[Client]:
"""
In various places, oauthlib documents:
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
It's unclear though that if this is not explicitly stated, and, we still
were to set request.client, whether that could have adverse side
effects. So, don't assign request.client here.
"""
cache = request._client_cache = getattr(request, "_client_cache", {})
if client_id in cache:
client = cache[client_id]
else:
client = Client.objects.filter(id=client_id).first()
cache[client_id] = client
return client
def _use_client(self, request, client: Client) -> None:
request.client = client
request.client.client_id = client.id # type:ignore[attr-defined]
def _lookup_authorization_code(
self, request, client_id: str, code: str
) -> Optional[dict]:
cache = request._code_cache = getattr(request, "_code_cache", {})
key = (client_id, code)
if key in cache:
authorization_code = cache[key]
else:
authorization_code = authorization_codes.lookup(client_id, code)
cache[key] = authorization_code
return authorization_code
def is_origin_allowed(self, client_id, origin, request, *args, **kwargs) -> bool:
client = self._lookup_client(request, client_id)
return bool(
client
and is_origin_allowed(
origin, client.get_cors_origins(), client.allow_uri_wildcards
)
)
def rotate_refresh_token(self, request):
return app_settings.ROTATE_REFRESH_TOKEN
def validate_silent_login(self, request) -> bool:
if context.request.user.is_authenticated:
request.user = context.request.user
return True
return False
def validate_silent_authorization(self, request) -> bool:
granted_scopes = set()
tokens = Token.objects.valid().filter(
user=context.request.user,
type__in=[Token.Type.REFRESH_TOKEN, Token.Type.ACCESS_TOKEN],
)
for token in tokens.iterator():
granted_scopes.update(token.get_scopes())
return set(request.scopes).issubset(granted_scopes)
def validate_jwt_bearer_token(self, token, scopes, request):
if scopes:
# We don't have scope for the ID token
return False
payload = decode_jwt_token(token, verify_iss=True, verify_exp=True)
if payload is None:
return False
return self.validate_client_id(payload["aud"], request)
| OAuthLibRequestValidator |
python | django__django | tests/admin_filters/tests.py | {
"start": 6017,
"end": 6296
} | class ____(BookAdmin):
list_filter = (
"year",
"author",
"contributors",
("is_best_seller", BooleanFieldListFilter),
"date_registered",
"no",
("availability", BooleanFieldListFilter),
)
| BookAdminWithTupleBooleanFilter |
python | google__jax | tests/sparse_test.py | {
"start": 43926,
"end": 45161
} | class ____(sptu.SparseTestCase):
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.floating,
indices_dtype=jtu.dtypes.integer,
)
def test_random_bcoo(self, shape, dtype, indices_dtype, n_batch, n_dense):
key = jax.random.PRNGKey(1701)
with jax.legacy_prng_key('allow'):
mat = sparse.random_bcoo(
key, shape=shape, dtype=dtype, indices_dtype=indices_dtype,
n_batch=n_batch, n_dense=n_dense)
mat_dense = mat.todense()
self.assertEqual(mat_dense.shape, shape)
self.assertEqual(mat_dense.dtype, dtype)
self.assertEqual(mat.indices.dtype, indices_dtype)
n_sparse = len(shape) - n_batch - n_dense
batch_shape, sparse_shape, dense_shape = split_list(shape, [n_batch, n_sparse])
approx_expected_num_nonzero = (
np.ceil(0.2 * math.prod(sparse_shape))
* math.prod(batch_shape) * math.prod(dense_shape))
num_nonzero = (mat_dense != 0).sum()
self.assertAlmostEqual(int(num_nonzero), approx_expected_num_nonzero, delta=2)
| SparseRandomTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/mro2.py | {
"start": 144,
"end": 243
} | class ____:
def foo(self, v1: str):
return None
def bar(self):
return None
| A |
python | django__django | django/db/models/functions/math.py | {
"start": 5785,
"end": 5857
} | class ____(Transform):
function = "SIGN"
lookup_name = "sign"
| Sign |
python | kamyu104__LeetCode-Solutions | Python/find-all-k-distant-indices-in-an-array.py | {
"start": 44,
"end": 532
} | class ____(object):
def findKDistantIndices(self, nums, key, k):
"""
:type nums: List[int]
:type key: int
:type k: int
:rtype: List[int]
"""
result = []
prev = -1
for i, x in enumerate(nums):
if x != key:
continue
for j in xrange(max(i-k, prev+1), min(i+k+1, len(nums))):
result.append(j)
prev = min(i+k, len(nums)-1)
return result
| Solution |
python | cherrypy__cherrypy | cherrypy/tutorial/tut08_generators_and_yield.py | {
"start": 381,
"end": 1478
} | class ____:
"""HTTP response streaming app."""
def header(self):
"""Render HTML layout header."""
return '<html><body><h2>Generators rule!</h2>'
def footer(self):
"""Render HTML layout footer."""
return '</body></html>'
@cherrypy.expose
def index(self):
"""Stream HTTP response body of generator app index URI."""
# Let's make up a list of users for presentation purposes
users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']
# Every yield line adds one part to the total result body.
yield self.header()
yield '<h3>List of users:</h3>'
for user in users:
yield '%s<br/>' % user
yield self.footer()
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(GeneratorDemo(), config=tutconf)
| GeneratorDemo |
python | ethereum__web3.py | web3/types.py | {
"start": 6063,
"end": 6183
} | class ____(TypedDict):
isSyncing: bool
startingBlock: int
currentBlock: int
highestBlock: int
| SyncProgress |
python | numba__numba | numba/tests/pdlike_usecase.py | {
"start": 406,
"end": 809
} | class ____(object):
"""
A minimal pandas.Index-like object.
"""
def __init__(self, data):
assert isinstance(data, np.ndarray)
assert data.ndim == 1
self._data = data
def __iter__(self):
return iter(self._data)
@property
def dtype(self):
return self._data.dtype
@property
def flags(self):
return self._data.flags
| Index |
python | apache__avro | lang/py/avro/schema.py | {
"start": 6539,
"end": 10288
} | class ____(abc.ABC, CanonicalPropertiesMixin):
"""Base class for all Schema classes."""
_reserved_properties = SCHEMA_RESERVED_PROPS
def __init__(self, type_: str, other_props: Optional[Mapping[str, object]] = None, validate_names: bool = True) -> None:
if not isinstance(type_, str):
raise avro.errors.SchemaParseException("Schema type must be a string.")
if type_ not in avro.constants.VALID_TYPES:
raise avro.errors.SchemaParseException(f"{type_} is not a valid type.")
self.set_prop("type", type_)
self.type = type_
self.props.update(other_props or {})
self.validate_names = validate_names
@abc.abstractmethod
def match(self, writer: "Schema") -> bool:
"""Return True if the current schema (as reader) matches the writer schema.
@arg writer: the writer schema to match against.
@return bool
"""
def __str__(self) -> str:
return json.dumps(self.to_json())
@abc.abstractmethod
def to_json(self, names: Optional[Names] = None) -> object:
"""
Converts the schema object into its AVRO specification representation.
Schema types that have names (records, enums, and fixed) must
be aware of not re-defining schemas that are already listed
in the parameter names.
"""
@abc.abstractmethod
def validate(self, datum: object) -> Optional["Schema"]:
"""Returns the appropriate schema object if datum is valid for that schema, else None.
To be implemented in subclasses.
Validation concerns only shape and type of data in the top level of the current schema.
In most cases, the returned schema object will be self. However, for UnionSchema objects,
the returned Schema will be the first branch schema for which validation passes.
@arg datum: The data to be checked for validity according to this schema
@return Optional[Schema]
"""
@abc.abstractmethod
def to_canonical_json(self, names: Optional[Names] = None) -> object:
"""
Converts the schema object into its Canonical Form
http://avro.apache.org/docs/current/spec.html#Parsing+Canonical+Form+for+Schemas
To be implemented in subclasses.
"""
@property
def canonical_form(self) -> str:
# The separators eliminate whitespace around commas and colons.
return json.dumps(self.to_canonical_json(), separators=(",", ":"))
@abc.abstractmethod
def __eq__(self, that: object) -> bool:
"""
Determines how two schema are compared.
Consider the mixins EqualByPropsMixin and EqualByJsonMixin
"""
def fingerprint(self, algorithm="CRC-64-AVRO") -> bytes:
"""
Generate fingerprint for supplied algorithm.
'CRC-64-AVRO' will be used as the algorithm by default, but any
algorithm supported by hashlib (as can be referenced with
`hashlib.algorithms_guaranteed`) can be specified.
`algorithm` param is used as an algorithm name, and NoSuchAlgorithmException
will be thrown if the algorithm is not among supported.
"""
schema = self.canonical_form.encode("utf-8")
if algorithm == "CRC-64-AVRO":
return _crc_64_fingerprint(schema)
if algorithm not in SUPPORTED_ALGORITHMS:
raise avro.errors.UnknownFingerprintAlgorithmException(f"Unknown Fingerprint Algorithm: {algorithm}")
# Generate digests with hashlib for all other algorithms
# Lowercase algorithm to support algorithm strings sent by other languages like Java
h = hashlib.new(algorithm.lower(), schema)
return h.digest()
| Schema |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_bash.py | {
"start": 1638,
"end": 12271
} | class ____:
def test_bash_operator_init(self):
"""Test the construction of the operator with its defaults and initially-derived attrs."""
op = BashOperator(task_id="bash_op", bash_command="echo")
assert op.bash_command == "echo"
assert op.env is None
assert op.append_env is False
assert op.output_encoding == "utf-8"
assert op.skip_on_exit_code == [99]
assert op.cwd is None
@pytest.mark.db_test
@pytest.mark.parametrize(
("append_env", "user_defined_env", "expected_airflow_home"),
[
(False, None, "MY_PATH_TO_AIRFLOW_HOME"),
(True, {"AIRFLOW_HOME": "OVERRIDDEN_AIRFLOW_HOME"}, "OVERRIDDEN_AIRFLOW_HOME"),
],
)
def test_echo_env_variables(
self, append_env, user_defined_env, expected_airflow_home, dag_maker, tmp_path
):
"""
Test that env variables are exported correctly to the task bash environment.
"""
utc_now = datetime.now(tz=timezone.utc)
expected = (
f"{expected_airflow_home}\n"
"AWESOME_PYTHONPATH\n"
"bash_op_test\n"
"echo_env_vars\n"
f"{utc_now.isoformat()}\n"
f"manual__{utc_now.isoformat()}\n"
)
date_env_name = "$AIRFLOW_CTX_LOGICAL_DATE" if AIRFLOW_V_3_0_PLUS else "$AIRFLOW_CTX_EXECUTION_DATE"
with dag_maker(
"bash_op_test",
default_args={"owner": "airflow", "retries": 100, "start_date": DEFAULT_DATE},
schedule="@daily",
dagrun_timeout=timedelta(minutes=60),
serialized=True,
):
tmp_file = tmp_path / "testfile"
BashOperator(
task_id="echo_env_vars",
bash_command=f"echo $AIRFLOW_HOME>> {tmp_file};"
f"echo $PYTHONPATH>> {tmp_file};"
f"echo $AIRFLOW_CTX_DAG_ID >> {tmp_file};"
f"echo $AIRFLOW_CTX_TASK_ID>> {tmp_file};"
f"echo {date_env_name}>> {tmp_file};"
f"echo $AIRFLOW_CTX_DAG_RUN_ID>> {tmp_file};",
append_env=append_env,
env=user_defined_env,
)
logical_date = utc_now
dr = dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
logical_date=logical_date,
start_date=utc_now,
state=State.RUNNING,
data_interval=(logical_date, logical_date),
)
with mock.patch.dict(
"os.environ", {"AIRFLOW_HOME": "MY_PATH_TO_AIRFLOW_HOME", "PYTHONPATH": "AWESOME_PYTHONPATH"}
):
dag_maker.run_ti("echo_env_vars", dr)
assert expected == tmp_file.read_text()
@pytest.mark.parametrize(
("val", "expected"),
[
("test-val", "test-val"),
("test-val\ntest-val\n", ""),
("test-val\ntest-val", "test-val"),
("", ""),
],
)
def test_return_value(self, val, expected, context):
op = BashOperator(task_id="abc", bash_command=f'set -e; echo "{val}";')
line = op.execute(context)
assert line == expected
def test_raise_exception_on_non_zero_exit_code(self, context):
bash_operator = BashOperator(bash_command="exit 42", task_id="test_return_value", dag=None)
with pytest.raises(
AirflowException, match="Bash command failed\\. The command returned a non-zero exit code 42\\."
):
bash_operator.execute(context)
def test_task_retries(self):
bash_operator = BashOperator(
bash_command='echo "stdout"', task_id="test_task_retries", retries=2, dag=None
)
assert bash_operator.retries == 2
def test_default_retries(self):
bash_operator = BashOperator(bash_command='echo "stdout"', task_id="test_default_retries", dag=None)
assert bash_operator.retries == 0
def test_command_not_found(self, context):
with pytest.raises(
AirflowException, match="Bash command failed\\. The command returned a non-zero exit code 127\\."
):
BashOperator(task_id="abc", bash_command="set -e; something-that-isnt-on-path").execute(context)
def test_unset_cwd(self, context):
val = "xxxx"
op = BashOperator(task_id="abc", bash_command=f'set -e; echo "{val}";')
line = op.execute(context)
assert line == val
def test_cwd_does_not_exist(self, context, tmp_path):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
test_cwd_folder = os.fspath(tmp_path / "test_command_with_cwd")
# There should be no exceptions when creating the operator even the `cwd` doesn't exist
bash_operator = BashOperator(task_id="abc", bash_command=test_cmd, cwd=os.fspath(test_cwd_folder))
with pytest.raises(AirflowException, match=f"Can not find the cwd: {test_cwd_folder}"):
bash_operator.execute(context)
def test_cwd_is_file(self, tmp_path):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
tmp_file = tmp_path / "testfile.var.env"
tmp_file.touch()
# Test if the cwd is a file_path
with pytest.raises(AirflowException, match=f"The cwd {tmp_file} must be a directory"):
BashOperator(task_id="abc", bash_command=test_cmd, cwd=os.fspath(tmp_file)).execute({})
def test_valid_cwd(self, context, tmp_path):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
test_cwd_path = tmp_path / "test_command_with_cwd"
test_cwd_path.mkdir()
# Test everything went alright
result = BashOperator(task_id="abc", bash_command=test_cmd, cwd=os.fspath(test_cwd_path)).execute(
context
)
assert result == "xxxx"
assert (test_cwd_path / "outputs.txt").read_text().splitlines()[0] == "xxxx"
@pytest.mark.parametrize(
("extra_kwargs", "actual_exit_code", "expected_exc"),
[
({}, 0, None),
({}, 100, AirflowException),
({}, 99, AirflowSkipException),
({"skip_on_exit_code": None}, 0, None),
({"skip_on_exit_code": None}, 100, AirflowException),
({"skip_on_exit_code": None}, 99, AirflowException),
({"skip_on_exit_code": 100}, 0, None),
({"skip_on_exit_code": 100}, 100, AirflowSkipException),
({"skip_on_exit_code": 100}, 99, AirflowException),
({"skip_on_exit_code": 0}, 0, AirflowSkipException),
({"skip_on_exit_code": [100]}, 0, None),
({"skip_on_exit_code": [100]}, 100, AirflowSkipException),
({"skip_on_exit_code": [100]}, 99, AirflowException),
({"skip_on_exit_code": [100, 102]}, 99, AirflowException),
({"skip_on_exit_code": (100,)}, 0, None),
({"skip_on_exit_code": (100,)}, 100, AirflowSkipException),
({"skip_on_exit_code": (100,)}, 99, AirflowException),
],
)
def test_skip(self, extra_kwargs, actual_exit_code, expected_exc, context):
kwargs = dict(task_id="abc", bash_command=f'set -e; echo "hello world"; exit {actual_exit_code};')
if extra_kwargs:
kwargs.update(**extra_kwargs)
if expected_exc is None:
BashOperator(**kwargs).execute(context)
else:
with pytest.raises(expected_exc):
BashOperator(**kwargs).execute(context)
def test_bash_operator_multi_byte_output(self, context):
op = BashOperator(
task_id="test_multi_byte_bash_operator",
bash_command="echo \u2600",
output_encoding="utf-8",
)
op.execute(context)
def test_bash_operator_output_processor(self, context):
json_string = '{"AAD_BASIC": "Azure Active Directory Basic"}'
op = BashOperator(
task_id="test_bash_operator_output_processor",
bash_command=f"echo '{json_string}'",
output_processor=lambda output: json.loads(output),
)
result = op.execute(context)
assert result == json.loads(json_string)
@pytest.mark.db_test
def test_bash_operator_kill(self, dag_maker):
import psutil
sleep_time = f"100{os.getpid()}"
with dag_maker(serialized=True):
BashOperator(
task_id="test_bash_operator_kill",
execution_timeout=timedelta(microseconds=25),
bash_command=f"/bin/bash -c 'sleep {sleep_time}'",
)
dr = dag_maker.create_dagrun()
with pytest.raises(AirflowTaskTimeout):
dag_maker.run_ti("test_bash_operator_kill", dr)
sleep(2)
for proc in psutil.process_iter():
if proc.cmdline() == ["sleep", sleep_time]:
os.kill(proc.pid, signal.SIGTERM)
pytest.fail("BashOperator's subprocess still running after stopping on timeout!")
@pytest.mark.db_test
def test_templated_fields(self, create_task_instance_of_operator):
ti = create_task_instance_of_operator(
BashOperator,
# Templated fields
bash_command='echo "{{ dag_run.dag_id }}"',
env={"FOO": "{{ ds }}"},
cwd="{{ task.dag.folder }}",
# Other parameters
dag_id="test_templated_fields_dag",
task_id="test_templated_fields_task",
)
ti.render_templates()
task: BashOperator = ti.task
assert task.bash_command == 'echo "test_templated_fields_dag"'
assert task.cwd == Path(__file__).absolute().parent.as_posix()
@pytest.mark.db_test
def test_templated_bash_script(self, dag_maker, tmp_path, session):
"""
Creates a .sh script with Jinja template.
Pass it to the BashOperator and ensure it gets correctly rendered and executed.
"""
bash_script: str = "sample.sh"
path: Path = tmp_path / bash_script
path.write_text('echo "{{ ti.task_id }}"')
with dag_maker(
dag_id="test_templated_bash_script", session=session, template_searchpath=os.fspath(path.parent)
):
BashOperator(task_id="test_templated_fields_task", bash_command=bash_script)
ti: TaskInstance = dag_maker.create_dagrun().task_instances[0]
session.add(ti)
session.commit()
context = ti.get_template_context(session=session)
ti.render_templates(context=context)
task: BashOperator = ti.task
result = task.execute(context=context)
assert result == "test_templated_fields_task"
| TestBashOperator |
python | getsentry__sentry | src/sentry/workflow_engine/types.py | {
"start": 1734,
"end": 2206
} | class ____(IntEnum):
OK = 0
LOW = PriorityLevel.LOW
MEDIUM = PriorityLevel.MEDIUM
HIGH = PriorityLevel.HIGH
# The unique key used to identify a group within a DataPacket result.
# For DataPackets that don't contain multiple values the key is just None.
# This is stored in 'DetectorState.detector_group_key'
DetectorGroupKey = str | None
DataConditionResult = DetectorPriorityLevel | int | float | bool | None
@dataclass(frozen=True)
| DetectorPriorityLevel |
python | PrefectHQ__prefect | src/prefect/_internal/testing.py | {
"start": 134,
"end": 2008
} | class ____:
"""Context manager for capturing exceptions during retry attempts."""
def __init__(self, attempt_number: int):
self.attempt_number = attempt_number
self.exception: Exception | None = None
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> bool:
if exc_val is not None:
self.exception = exc_val # type: ignore
return exc_type is AssertionError
async def retry_asserts(
max_attempts: int = 3,
delay: float = 1.0,
) -> AsyncIterator[AssertionRetryAttempt]:
"""
Async generator that retries a block of assertions until it succeeds or max attempts is reached.
Useful for testing eventual consistency scenarios where changes may not
propagate immediately.
Args:
max_attempts: Maximum number of attempts before raising the exception.
delay: Time in seconds to wait between retry attempts.
Yields:
A context manager that captures exceptions during each attempt.
Raises:
The last exception raised within the block if all attempts fail.
Example:
```python
async for attempt in retry_asserts(max_attempts=3):
with attempt:
for deployment in deployments:
await session.refresh(deployment)
assert deployment.status == DeploymentStatus.READY
```
"""
for attempt_number in range(1, max_attempts + 1):
attempt = AssertionRetryAttempt(attempt_number)
yield attempt
if attempt.exception is None:
return # Success, exit early
if attempt_number == max_attempts:
raise attempt.exception
await asyncio.sleep(delay)
| AssertionRetryAttempt |
python | ipython__ipython | tests/test_oinspect.py | {
"start": 3855,
"end": 3943
} | class ____(object):
def __getattr__(self, name):
raise Exception(name)
| Awkward |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 8671,
"end": 8885
} | class ____(_QuantizerConfigUpdate):
enabled: Optional[bool]
rescoreLimit: Optional[int]
trainingLimit: Optional[int]
@staticmethod
def quantizer_name() -> str:
return "sq"
| _SQConfigUpdate |
python | django__django | tests/aggregation/tests.py | {
"start": 1455,
"end": 92133
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerySetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerySetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
@skipUnlessDBFeature("supports_aggregate_distinct_multiple_argument")
def test_distinct_on_stringagg(self):
books = Book.objects.aggregate(
ratings=StringAgg(Cast(F("rating"), CharField()), Value(","), distinct=True)
)
self.assertCountEqual(books["ratings"].split(","), ["3", "4", "4.5", "5"])
@skipIfDBFeature("supports_aggregate_distinct_multiple_argument")
def test_raises_error_on_multiple_argument_distinct(self):
message = (
"StringAgg does not support distinct with multiple expressions on this "
"database backend."
)
with self.assertRaisesMessage(NotSupportedError, message):
Book.objects.aggregate(
ratings=StringAgg(
Cast(F("rating"), CharField()),
Value(","),
distinct=True,
)
)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerySetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerySetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerySetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerySetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerySetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot compute Avg('age_agg'): 'age_agg' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Author.objects.aggregate(
age_agg=Sum(F("age")),
avg_age=Avg(F("age_agg")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
arity = None
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MultiArgAgg(Max):
output_field = DecimalField()
arity = None
def as_sql(self, compiler, connection, **extra_context):
copy = self.copy()
# Most database backends do not support compiling multiple
# arguments on the Max aggregate, and that isn't what is being
# tested here anyway. To avoid errors, the extra argument is
# just dropped.
copy.set_source_expressions(
copy.get_source_expressions()[0:1] + [None, None]
)
return super(MultiArgAgg, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MultiArgAgg("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MultiArgAgg("pages", "price"))
Book.objects.aggregate(max_field=MultiArgAgg("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
"filter": "",
"order_by": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {
"function": "MAX",
"expressions": "2",
"distinct": "",
"filter": "",
"order_by": "",
}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists()
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
pk=OuterRef("book"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
)
.annotate(
rating=Subquery(books_rating_qs),
)
.values("rating")
.annotate(total_count=Count("*"))
.order_by("rating")
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 3.0, "total_count": 1},
{"rating": 4.0, "total_count": 3},
{"rating": 4.5, "total_count": 1},
{"rating": 5.0, "total_count": 1},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
def test_group_by_nested_expression_with_params(self):
greatest_pages_param = "greatest_pages"
if connection.vendor == "mysql" and connection.features.supports_any_value:
greatest_pages_param = AnyValue("greatest_pages")
books_qs = (
Book.objects.annotate(greatest_pages=Greatest("pages", Value(600)))
.values(
"greatest_pages",
)
.annotate(
min_pages=Min("pages"),
least=Least("min_pages", greatest_pages_param),
)
.values_list("least", flat=True)
)
self.assertCountEqual(books_qs, [300, 946, 1132])
@skipUnlessDBFeature("supports_any_value")
def test_any_value(self):
books_qs = (
Book.objects.values(greatest_pages=Greatest("pages", 600))
.annotate(
pubdate_year=AnyValue("pubdate__year"),
)
.values_list("pubdate_year", flat=True)
.order_by("pubdate_year")
)
self.assertCountEqual(books_qs[0:2], [1991, 1995])
self.assertIn(books_qs[2], [2007, 2008])
@skipUnlessDBFeature("supports_any_value")
def test_any_value_filter(self):
books_qs = (
Book.objects.values(greatest_pages=Greatest("pages", 600))
.annotate(
pubdate_year=AnyValue("pubdate__year", filter=Q(rating__lte=4.5)),
)
.values_list("pubdate_year", flat=True)
)
self.assertCountEqual(books_qs, [2007, 1995, None])
@skipUnlessDBFeature("supports_any_value")
def test_any_value_aggregate_clause(self):
books_qs = (
Book.objects.values(greatest_pages=Greatest("pages", 600))
.annotate(
num_authors=Count("authors"),
pages_per_author=(
AnyValue("greatest_pages") / (Cast("num_authors", FloatField()))
),
)
.values_list("pages_per_author", flat=True)
.order_by("pages_per_author")
)
self.assertAlmostEqual(books_qs[0], 600 / 7, places=4)
self.assertAlmostEqual(books_qs[1], 1132 / 2, places=4)
self.assertAlmostEqual(books_qs[2], 946 / 1, places=4)
aggregate_qs = books_qs.aggregate(Avg("pages_per_author"))
self.assertAlmostEqual(
aggregate_qs["pages_per_author__avg"],
((600 / 7) + (1132 / 2) + (946 / 1)) / 3,
places=4,
)
@skipIfDBFeature("supports_any_value")
def test_any_value_not_supported(self):
message = "ANY_VALUE is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, message):
Book.objects.aggregate(AnyValue("rating"))
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(books_qs, [book])
if connection.features.allows_group_by_select_index:
self.assertEqual(ctx[0]["sql"].count("SELECT"), 3)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_aggregation_exists_multivalued_outeref(self):
self.assertCountEqual(
Publisher.objects.annotate(
books_exists=Exists(
Book.objects.filter(publisher=OuterRef("book__publisher"))
),
books_count=Count("book"),
),
Publisher.objects.all(),
)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertCountEqual(authors, Author.objects.all())
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_order_by_aggregate_transform(self):
class Mod100(Mod, Transform):
def __init__(self, expr):
super().__init__(expr, 100)
sum_field = IntegerField()
sum_field.register_lookup(Mod100, "mod100")
publisher_pages = (
Book.objects.values("publisher")
.annotate(sum_pages=Sum("pages", output_field=sum_field))
.order_by("sum_pages__mod100")
)
self.assertQuerySetEqual(
publisher_pages,
[
{"publisher": self.p2.id, "sum_pages": 528},
{"publisher": self.p4.id, "sum_pages": 946},
{"publisher": self.p1.id, "sum_pages": 747},
{"publisher": self.p3.id, "sum_pages": 1482},
],
)
def test_order_by_aggregate_default_alias(self):
publisher_books = (
Publisher.objects.values("book")
.annotate(Count("book"))
.order_by("book__count", "book__id")
.values_list("book", flat=True)
)
self.assertQuerySetEqual(
publisher_books,
[
None,
self.b1.id,
self.b2.id,
self.b3.id,
self.b4.id,
self.b5.id,
self.b6.id,
],
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
all_names=StringAgg("name", Value(",")),
),
{
"sum_awards": None,
"books_count": 0,
"all_names": None,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_stringagg_default_value(self):
result = Author.objects.filter(age__gt=100).aggregate(
value=StringAgg("name", delimiter=Value(";"), default=Value("<empty>")),
)
self.assertEqual(result["value"], "<empty>")
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.UTC)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.UTC)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.UTC)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, hashes, quotation "
"marks, semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
def test_multiple_aggregate_references(self):
aggregates = Author.objects.aggregate(
total_books=Count("book"),
coalesced_total_books=Coalesce("total_books", 0),
)
self.assertEqual(
aggregates,
{
"total_books": 10,
"coalesced_total_books": 10,
},
)
def test_group_by_transform_column(self):
self.assertSequenceEqual(
Store.objects.values(
"original_opening__date",
"name",
)
.annotate(Count("books"))
.order_by("name"),
[
{
"original_opening__date": datetime.date(1994, 4, 23),
"name": "Amazon.com",
"books__count": 6,
},
{
"original_opening__date": datetime.date(2001, 3, 15),
"name": "Books.com",
"books__count": 4,
},
{
"original_opening__date": datetime.date(1945, 4, 25),
"name": "Mamma and Pappa's Books",
"books__count": 3,
},
],
)
def test_group_by_reference_subquery(self):
author_qs = (
Author.objects.annotate(publisher_id=F("book__publisher"))
.values("publisher_id")
.annotate(cnt=Count("*"))
.values("publisher_id")
)
qs = Publisher.objects.filter(pk__in=author_qs)
self.assertCountEqual(qs, [self.p1, self.p2, self.p3, self.p4])
def test_having_with_no_group_by(self):
author_qs = (
Author.objects.values(static_value=Value("static-value"))
.annotate(sum=Sum("age"))
.filter(sum__gte=0)
.values_list("sum", flat=True)
)
self.assertEqual(list(author_qs), [337])
def test_aggregate_arity(self):
funcs_with_inherited_constructors = [Avg, Max, Min, Sum]
msg = "takes exactly 1 argument (2 given)"
for function in funcs_with_inherited_constructors:
with (
self.subTest(function=function),
self.assertRaisesMessage(TypeError, msg),
):
function(Value(1), Value(2))
funcs_with_custom_constructors = [Count, StdDev, Variance]
for function in funcs_with_custom_constructors:
with self.subTest(function=function):
# Extra arguments are rejected via the constructor.
with self.assertRaises(TypeError):
function(Value(1), True, Value(2))
# If the constructor is skipped, the arity check runs.
func_instance = function(Value(1), True)
with self.assertRaisesMessage(TypeError, msg):
super(function, func_instance).__init__(Value(1), Value(2))
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
Book.objects.aggregate(stringagg=StringAgg("name"))
def test_string_agg_escapes_delimiter(self):
values = Publisher.objects.aggregate(
stringagg=StringAgg("name", delimiter=Value("'"))
)
self.assertEqual(
values,
{
"stringagg": "Apress'Sams'Prentice Hall'Morgan Kaufmann'Jonno's House "
"of Books",
},
)
@skipUnlessDBFeature("supports_aggregate_order_by_clause")
def test_string_agg_order_by(self):
order_by_test_cases = (
(
F("original_opening").desc(),
"Books.com;Amazon.com;Mamma and Pappa's Books",
),
(
F("original_opening").asc(),
"Mamma and Pappa's Books;Amazon.com;Books.com",
),
(F("original_opening"), "Mamma and Pappa's Books;Amazon.com;Books.com"),
("original_opening", "Mamma and Pappa's Books;Amazon.com;Books.com"),
("-original_opening", "Books.com;Amazon.com;Mamma and Pappa's Books"),
(
Concat("original_opening", Value("@")),
"Mamma and Pappa's Books;Amazon.com;Books.com",
),
(
Concat("original_opening", Value("@")).desc(),
"Books.com;Amazon.com;Mamma and Pappa's Books",
),
)
for order_by, expected_output in order_by_test_cases:
with self.subTest(order_by=order_by, expected_output=expected_output):
values = Store.objects.aggregate(
stringagg=StringAgg("name", delimiter=Value(";"), order_by=order_by)
)
self.assertEqual(values, {"stringagg": expected_output})
@skipIfDBFeature("supports_aggregate_order_by_clause")
def test_string_agg_order_by_is_not_supported(self):
message = (
"This database backend does not support specifying an order on aggregates."
)
with self.assertRaisesMessage(NotSupportedError, message):
Store.objects.aggregate(
stringagg=StringAgg(
"name",
delimiter=Value(";"),
order_by="original_opening",
)
)
def test_string_agg_filter(self):
values = Book.objects.aggregate(
stringagg=StringAgg(
"name",
delimiter=Value(";"),
filter=Q(name__startswith="P"),
)
)
expected_values = {
"stringagg": "Practical Django Projects;"
"Python Web Development with Django;Paradigms of Artificial "
"Intelligence Programming: Case Studies in Common Lisp",
}
self.assertEqual(values, expected_values)
@skipUnlessDBFeature("supports_aggregate_order_by_clause")
def test_string_agg_filter_outerref(self):
values = (
Publisher.objects.annotate(
stringagg=Subquery(
Book.objects.annotate(
stringagg=StringAgg(
"name",
delimiter=Value(";"),
order_by=OuterRef("num_awards"),
)
).values("stringagg")[:1]
)
)
.values("stringagg")
.order_by("id")
)
self.assertQuerySetEqual(
values,
[
{
"stringagg": "The Definitive Guide to Django: "
"Web Development Done Right"
}
]
* 5,
)
@skipUnlessDBFeature("supports_json_field", "supports_aggregate_order_by_clause")
def test_string_agg_jsonfield_order_by(self):
Employee.objects.bulk_create(
[
Employee(work_day_preferences={"Monday": "morning"}),
Employee(work_day_preferences={"Monday": "afternoon"}),
]
)
values = Employee.objects.aggregate(
stringagg=StringAgg(
KeyTextTransform("Monday", "work_day_preferences"),
delimiter=Value(","),
order_by=KeyTextTransform(
"Monday",
"work_day_preferences",
),
output_field=CharField(),
),
)
self.assertEqual(values, {"stringagg": "afternoon,morning"})
def test_string_agg_filter_in_subquery(self):
aggregate = StringAgg(
"authors__name",
delimiter=Value(";"),
filter=~Q(authors__name__startswith="J"),
)
subquery = (
Book.objects.filter(
pk=OuterRef("pk"),
)
.annotate(agg=aggregate)
.values("agg")
)
values = list(
Book.objects.annotate(
agg=Subquery(subquery),
).values_list("agg", flat=True)
)
expected_values = [
"Adrian Holovaty",
"Brad Dayley",
"Paul Bissex;Wesley J. Chun",
"Peter Norvig;Stuart Russell",
"Peter Norvig",
"" if connection.features.interprets_empty_strings_as_nulls else None,
]
self.assertQuerySetEqual(expected_values, values, ordered=False)
@skipUnlessDBFeature("supports_aggregate_order_by_clause")
def test_order_by_in_subquery(self):
aggregate = StringAgg(
"authors__name",
delimiter=Value(";"),
order_by="authors__name",
)
subquery = (
Book.objects.filter(
pk=OuterRef("pk"),
)
.annotate(agg=aggregate)
.values("agg")
)
values = list(
Book.objects.annotate(
agg=Subquery(subquery),
)
.order_by("agg")
.values_list("agg", flat=True)
)
expected_values = [
"Adrian Holovaty;Jacob Kaplan-Moss",
"Brad Dayley",
"James Bennett",
"Jeffrey Forcier;Paul Bissex;Wesley J. Chun",
"Peter Norvig",
"Peter Norvig;Stuart Russell",
]
self.assertEqual(expected_values, values)
| AggregateTestCase |
python | doocs__leetcode | solution/1500-1599/1546.Maximum Number of Non-Overlapping Subarrays With Sum Equals Target/Solution.py | {
"start": 0,
"end": 426
} | class ____:
def maxNonOverlapping(self, nums: List[int], target: int) -> int:
ans = 0
i, n = 0, len(nums)
while i < n:
s = 0
vis = {0}
while i < n:
s += nums[i]
if s - target in vis:
ans += 1
break
i += 1
vis.add(s)
i += 1
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cloud/exceptions.py | {
"start": 523,
"end": 630
} | class ____(DbtCloudException):
"""Raised when a triggered job run is cancelled"""
| DbtCloudJobRunCancelled |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_datasync.py | {
"start": 14805,
"end": 22678
} | class ____(DataSyncTestCaseBase):
def set_up_operator(
self,
task_id="test_datasync_get_tasks_operator",
task_arn=None,
source_location_uri=SOURCE_LOCATION_URI,
destination_location_uri=DESTINATION_LOCATION_URI,
allow_random_task_choice=False,
):
# Create operator
self.datasync = DataSyncOperator(
task_id=task_id,
dag=self.dag,
task_arn=task_arn,
source_location_uri=source_location_uri,
destination_location_uri=destination_location_uri,
create_source_location_kwargs=MOCK_DATA["create_source_location_kwargs"],
create_destination_location_kwargs=MOCK_DATA["create_destination_location_kwargs"],
create_task_kwargs=MOCK_DATA["create_task_kwargs"],
allow_random_task_choice=allow_random_task_choice,
wait_interval_seconds=0,
)
def test_init(self, mock_get_conn):
self.set_up_operator()
# Airflow built-ins
assert self.datasync.task_id == MOCK_DATA["get_task_id"]
# Defaults
assert self.datasync.aws_conn_id == "aws_default"
assert not self.datasync.allow_random_location_choice
# Assignments
assert self.datasync.source_location_uri == MOCK_DATA["source_location_uri"]
assert self.datasync.destination_location_uri == MOCK_DATA["destination_location_uri"]
assert not self.datasync.allow_random_task_choice
# ### Check mocks:
mock_get_conn.assert_not_called()
def test_init_fails(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
with pytest.raises(AirflowException):
self.set_up_operator(source_location_uri=None)
with pytest.raises(AirflowException):
self.set_up_operator(destination_location_uri=None)
with pytest.raises(AirflowException):
self.set_up_operator(source_location_uri=None, destination_location_uri=None)
# ### Check mocks:
mock_get_conn.assert_not_called()
def test_get_no_location(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.set_up_operator()
locations = self.client.list_locations()
for location in locations["Locations"]:
self.client.delete_location(LocationArn=location["LocationArn"])
locations = self.client.list_locations()
assert len(locations["Locations"]) == 0
# Execute the task
result = self.datasync.execute(None)
assert result is not None
locations = self.client.list_locations()
assert result is not None
assert len(locations) == 2
# ### Check mocks:
mock_get_conn.assert_called()
def test_get_no_tasks2(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.set_up_operator()
tasks = self.client.list_tasks()
for task in tasks["Tasks"]:
self.client.delete_task(TaskArn=task["TaskArn"])
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == 0
# Execute the task
result = self.datasync.execute(None)
assert result is not None
# ### Check mocks:
mock_get_conn.assert_called()
def test_get_one_task(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Make sure we don't cheat
self.set_up_operator()
assert self.datasync.task_arn is None
# Check how many tasks and locations we have
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == 1
locations = self.client.list_locations()
assert len(locations["Locations"]) == 2
# Execute the task
result = self.datasync.execute(None)
assert result is not None
task_arn = result["TaskArn"]
assert task_arn is not None
assert task_arn
assert task_arn == self.task_arn
# Assert 0 additional task and 0 additional locations
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == 1
locations = self.client.list_locations()
assert len(locations["Locations"]) == 2
# ### Check mocks:
mock_get_conn.assert_called()
def test_get_many_tasks(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.set_up_operator(task_id="datasync_task1")
self.client.create_task(
SourceLocationArn=self.source_location_arn,
DestinationLocationArn=self.destination_location_arn,
)
# Check how many tasks and locations we have
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == 2
locations = self.client.list_locations()
assert len(locations["Locations"]) == 2
# Execute the task
with pytest.raises(AirflowException):
self.datasync.execute(None)
# Assert 0 additional task and 0 additional locations
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == 2
locations = self.client.list_locations()
assert len(locations["Locations"]) == 2
self.set_up_operator(task_id="datasync_task2", task_arn=self.task_arn, allow_random_task_choice=True)
self.datasync.execute(None)
# ### Check mocks:
mock_get_conn.assert_called()
def test_execute_specific_task(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_arn = self.client.create_task(
SourceLocationArn=self.source_location_arn,
DestinationLocationArn=self.destination_location_arn,
)["TaskArn"]
self.set_up_operator(task_arn=task_arn)
result = self.datasync.execute(None)
assert result["TaskArn"] == task_arn
assert self.datasync.task_arn == task_arn
# ### Check mocks:
mock_get_conn.assert_called()
@pytest.mark.db_test
def test_return_value(
self, mock_get_conn, session, clean_dags_dagruns_and_dagbundles, testing_dag_bundle
):
"""Test we return the right value -- that will get put in to XCom by the execution engine"""
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.set_up_operator()
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
sync_dag_to_db(self.dag)
dag_version = DagVersion.get_latest_version(self.dag.dag_id)
ti = TaskInstance(task=self.datasync, dag_version_id=dag_version.id)
dag_run = DagRun(
dag_id=self.dag.dag_id,
logical_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
else:
dag_run = DagRun(
dag_id=self.dag.dag_id,
execution_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
ti = TaskInstance(task=self.datasync)
ti.dag_run = dag_run
session.add(ti)
session.commit()
result = self.datasync.execute(ti.get_template_context())
assert result["TaskArn"] == self.task_arn
# ### Check mocks:
mock_get_conn.assert_called()
@mock_aws
@mock.patch.object(DataSyncHook, "get_conn")
| TestDataSyncOperatorGetTasks |
python | django__django | tests/admin_scripts/management/commands/app_command.py | {
"start": 53,
"end": 349
} | class ____(AppCommand):
help = "Test Application-based commands"
requires_system_checks = []
def handle_app_config(self, app_config, **options):
print(
"EXECUTE:AppCommand name=%s, options=%s"
% (app_config.name, sorted(options.items()))
)
| Command |
python | ray-project__ray | doc/source/serve/doc_code/object_detection.py | {
"start": 955,
"end": 1817
} | class ____:
def __init__(self):
self.model = torch.hub.load("ultralytics/yolov5", "yolov5s")
self.model.cuda()
self.model.to(torch.device(0))
def detect(self, image_url: str):
result_im = self.model(image_url)
return Image.fromarray(result_im.render()[0].astype(np.uint8))
entrypoint = APIIngress.bind(ObjectDetection.bind())
# __example_code_end__
if __name__ == "__main__":
import ray
import requests
import os
ray.init(runtime_env={"pip": ["seaborn", "ultralytics"]})
serve.run(entrypoint)
image_url = "https://ultralytics.com/images/zidane.jpg"
resp = requests.get(f"http://127.0.0.1:8000/detect?image_url={image_url}")
with open("output.jpeg", "wb") as f:
f.write(resp.content)
assert os.path.exists("output.jpeg")
os.remove("output.jpeg")
| ObjectDetection |
python | weaviate__weaviate-python-client | weaviate/aliases/sync.py | {
"start": 163,
"end": 218
} | class ____(_AliasExecutor[ConnectionSync]):
pass
| _Alias |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 19275,
"end": 21379
} | class ____(Benchmark):
r"""
Shekel 5 objective function.
This class defines the Shekel 5 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shekel05}}(x) = \sum_{i=1}^{m} \frac{1}{c_{i}
+ \sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
a =
\begin{bmatrix}
4.0 & 4.0 & 4.0 & 4.0 \\ 1.0 & 1.0 & 1.0 & 1.0 \\
8.0 & 8.0 & 8.0 & 8.0 \\ 6.0 & 6.0 & 6.0 & 6.0 \\
3.0 & 7.0 & 3.0 & 7.0
\end{bmatrix}
.. math::
c = \begin{bmatrix} 0.1 \\ 0.2 \\ 0.2 \\ 0.4 \\ 0.4 \end{bmatrix}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = -10.15319585` for :math:`x_i = 4` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: this is a different global minimum compared to Jamil#130. The
minimum is found by doing lots of optimisations. The solution is supposed
to be at [4] * N, is there any numerical overflow?
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[4.00003715092,
4.00013327435,
4.00003714871,
4.0001332742]]
self.fglob = -10.1531996791
self.A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0]])
self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4])
def fun(self, x, *args):
self.nfev += 1
return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))
| Shekel05 |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/perl.py | {
"start": 775,
"end": 3446
} | class ____(Task.Task):
run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}'
color = 'BLUE'
ext_out = ['.h']
@conf
def check_perl_version(self, minver=None):
res = True
if minver:
cver = '.'.join(map(str, minver))
else:
cver = ''
self.start_msg('Checking for minimum perl version %s' % cver)
perl = self.find_program('perl', var='PERL', value=getattr(Options.options, 'perlbinary', None))
version = self.cmd_and_log(perl + ["-e", 'printf \"%vd\", $^V'])
if not version:
res = False
version = "Unknown"
elif not minver is None:
ver = tuple(map(int, version.split(".")))
if ver < minver:
res = False
self.end_msg(version, color=res and 'GREEN' or 'YELLOW')
return res
@conf
def check_perl_module(self, module):
cmd = self.env.PERL + ['-e', 'use %s' % module]
self.start_msg('perl module %s' % module)
try:
r = self.cmd_and_log(cmd)
except Errors.WafError:
self.end_msg(False)
return None
self.end_msg(r or True)
return r
@conf
def check_perl_ext_devel(self):
env = self.env
perl = env.PERL
if not perl:
self.fatal('find perl first')
def cmd_perl_config(s):
return perl + ['-MConfig', '-e', 'print \"%s\"' % s]
def cfg_str(cfg):
return self.cmd_and_log(cmd_perl_config(cfg))
def cfg_lst(cfg):
return Utils.to_list(cfg_str(cfg))
def find_xsubpp():
for var in ('privlib', 'vendorlib'):
xsubpp = cfg_lst('$Config{%s}/ExtUtils/xsubpp$Config{exe_ext}' % var)
if xsubpp and os.path.isfile(xsubpp[0]):
return xsubpp
return self.find_program('xsubpp')
env.LINKFLAGS_PERLEXT = cfg_lst('$Config{lddlflags}')
env.INCLUDES_PERLEXT = cfg_lst('$Config{archlib}/CORE')
env.CFLAGS_PERLEXT = cfg_lst('$Config{ccflags} $Config{cccdlflags}')
env.EXTUTILS_TYPEMAP = cfg_lst('$Config{privlib}/ExtUtils/typemap')
env.XSUBPP = find_xsubpp()
if not getattr(Options.options, 'perlarchdir', None):
env.ARCHDIR_PERL = cfg_str('$Config{sitearch}')
else:
env.ARCHDIR_PERL = getattr(Options.options, 'perlarchdir')
env.perlext_PATTERN = '%s.' + cfg_str('$Config{dlext}')
def options(opt):
opt.add_option(
'--with-perl-binary', type='string', dest='perlbinary', help='Specify alternate perl binary', default=None
)
opt.add_option(
'--with-perl-archdir',
type='string',
dest='perlarchdir',
help='Specify directory where to install arch specific files',
default=None
)
| xsubpp |
python | plotly__plotly.py | plotly/graph_objs/scattermap/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9964
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermap.marker.colorbar.title"
_path_str = "scattermap.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermap.mar
ker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermap.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermap.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 170443,
"end": 177097
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateBranchProtectionRule"""
__schema__ = github_schema
__field_names__ = (
"repository_id",
"pattern",
"requires_approving_reviews",
"required_approving_review_count",
"requires_commit_signatures",
"requires_linear_history",
"blocks_creations",
"allows_force_pushes",
"allows_deletions",
"is_admin_enforced",
"requires_status_checks",
"requires_strict_status_checks",
"requires_code_owner_reviews",
"dismisses_stale_reviews",
"restricts_review_dismissals",
"review_dismissal_actor_ids",
"bypass_pull_request_actor_ids",
"bypass_force_push_actor_ids",
"restricts_pushes",
"push_actor_ids",
"required_status_check_contexts",
"required_status_checks",
"requires_deployments",
"required_deployment_environments",
"requires_conversation_resolution",
"require_last_push_approval",
"lock_branch",
"lock_allows_fetch_and_merge",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The global relay id of the repository in which a new branch
protection rule should be created in.
"""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""The glob-like pattern used to determine matching branches."""
requires_approving_reviews = sgqlc.types.Field(Boolean, graphql_name="requiresApprovingReviews")
"""Are approving reviews required to update matching branches."""
required_approving_review_count = sgqlc.types.Field(Int, graphql_name="requiredApprovingReviewCount")
"""Number of approving reviews required to update matching branches."""
requires_commit_signatures = sgqlc.types.Field(Boolean, graphql_name="requiresCommitSignatures")
"""Are commits required to be signed."""
requires_linear_history = sgqlc.types.Field(Boolean, graphql_name="requiresLinearHistory")
"""Are merge commits prohibited from being pushed to this branch."""
blocks_creations = sgqlc.types.Field(Boolean, graphql_name="blocksCreations")
"""Is branch creation a protected operation."""
allows_force_pushes = sgqlc.types.Field(Boolean, graphql_name="allowsForcePushes")
"""Are force pushes allowed on this branch."""
allows_deletions = sgqlc.types.Field(Boolean, graphql_name="allowsDeletions")
"""Can this branch be deleted."""
is_admin_enforced = sgqlc.types.Field(Boolean, graphql_name="isAdminEnforced")
"""Can admins overwrite branch protection."""
requires_status_checks = sgqlc.types.Field(Boolean, graphql_name="requiresStatusChecks")
"""Are status checks required to update matching branches."""
requires_strict_status_checks = sgqlc.types.Field(Boolean, graphql_name="requiresStrictStatusChecks")
"""Are branches required to be up to date before merging."""
requires_code_owner_reviews = sgqlc.types.Field(Boolean, graphql_name="requiresCodeOwnerReviews")
"""Are reviews from code owners required to update matching branches."""
dismisses_stale_reviews = sgqlc.types.Field(Boolean, graphql_name="dismissesStaleReviews")
"""Will new commits pushed to matching branches dismiss pull request
review approvals.
"""
restricts_review_dismissals = sgqlc.types.Field(Boolean, graphql_name="restrictsReviewDismissals")
"""Is dismissal of pull request reviews restricted."""
review_dismissal_actor_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="reviewDismissalActorIds")
"""A list of User, Team, or App IDs allowed to dismiss reviews on
pull requests targeting matching branches.
"""
bypass_pull_request_actor_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="bypassPullRequestActorIds"
)
"""A list of User, Team, or App IDs allowed to bypass pull requests
targeting matching branches.
"""
bypass_force_push_actor_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="bypassForcePushActorIds")
"""A list of User, Team, or App IDs allowed to bypass force push
targeting matching branches.
"""
restricts_pushes = sgqlc.types.Field(Boolean, graphql_name="restrictsPushes")
"""Is pushing to matching branches restricted."""
push_actor_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="pushActorIds")
"""A list of User, Team, or App IDs allowed to push to matching
branches.
"""
required_status_check_contexts = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="requiredStatusCheckContexts"
)
"""List of required status check contexts that must pass for commits
to be accepted to matching branches.
"""
required_status_checks = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null("RequiredStatusCheckInput")), graphql_name="requiredStatusChecks"
)
"""The list of required status checks"""
requires_deployments = sgqlc.types.Field(Boolean, graphql_name="requiresDeployments")
"""Are successful deployments required before merging."""
required_deployment_environments = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="requiredDeploymentEnvironments"
)
"""The list of required deployment environments"""
requires_conversation_resolution = sgqlc.types.Field(Boolean, graphql_name="requiresConversationResolution")
"""Are conversations required to be resolved before merging."""
require_last_push_approval = sgqlc.types.Field(Boolean, graphql_name="requireLastPushApproval")
"""Whether the most recent push must be approved by someone other
than the person who pushed it
"""
lock_branch = sgqlc.types.Field(Boolean, graphql_name="lockBranch")
"""Whether to set the branch as read-only. If this is true, users
will not be able to push to the branch.
"""
lock_allows_fetch_and_merge = sgqlc.types.Field(Boolean, graphql_name="lockAllowsFetchAndMerge")
"""Whether users can pull changes from upstream when the branch is
locked. Set to `true` to allow fork syncing. Set to `false` to
prevent fork syncing.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateBranchProtectionRuleInput |
python | kubernetes-client__python | kubernetes/client/api/admissionregistration_v1_api.py | {
"start": 543,
"end": 403195
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_mutating_webhook_configuration(self, body, **kwargs): # noqa: E501
"""create_mutating_webhook_configuration # noqa: E501
create a MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_webhook_configuration(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1MutatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_mutating_webhook_configuration_with_http_info(body, **kwargs) # noqa: E501
def create_mutating_webhook_configuration_with_http_info(self, body, **kwargs): # noqa: E501
"""create_mutating_webhook_configuration # noqa: E501
create a MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mutating_webhook_configuration_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1MutatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_mutating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MutatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_validating_admission_policy(self, body, **kwargs): # noqa: E501
"""create_validating_admission_policy # noqa: E501
create a ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_admission_policy(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_validating_admission_policy_with_http_info(body, **kwargs) # noqa: E501
def create_validating_admission_policy_with_http_info(self, body, **kwargs): # noqa: E501
"""create_validating_admission_policy # noqa: E501
create a ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_admission_policy_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_validating_admission_policy_binding(self, body, **kwargs): # noqa: E501
"""create_validating_admission_policy_binding # noqa: E501
create a ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_admission_policy_binding(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_validating_admission_policy_binding_with_http_info(body, **kwargs) # noqa: E501
def create_validating_admission_policy_binding_with_http_info(self, body, **kwargs): # noqa: E501
"""create_validating_admission_policy_binding # noqa: E501
create a ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_admission_policy_binding_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_validating_webhook_configuration(self, body, **kwargs): # noqa: E501
"""create_validating_webhook_configuration # noqa: E501
create a ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_webhook_configuration(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_validating_webhook_configuration_with_http_info(body, **kwargs) # noqa: E501
def create_validating_webhook_configuration_with_http_info(self, body, **kwargs): # noqa: E501
"""create_validating_webhook_configuration # noqa: E501
create a ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_validating_webhook_configuration_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1ValidatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_validating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_mutating_webhook_configuration(self, **kwargs): # noqa: E501
"""delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
def delete_collection_mutating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_validating_admission_policy(self, **kwargs): # noqa: E501
"""delete_collection_validating_admission_policy # noqa: E501
delete collection of ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_admission_policy(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
def delete_collection_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_validating_admission_policy # noqa: E501
delete collection of ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_admission_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_validating_admission_policy_binding(self, **kwargs): # noqa: E501
"""delete_collection_validating_admission_policy_binding # noqa: E501
delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_admission_policy_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
def delete_collection_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_validating_admission_policy_binding # noqa: E501
delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_admission_policy_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_validating_webhook_configuration(self, **kwargs): # noqa: E501
"""delete_collection_validating_webhook_configuration # noqa: E501
delete collection of ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_validating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
def delete_collection_validating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_validating_webhook_configuration # noqa: E501
delete collection of ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_validating_webhook_configuration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""delete_mutating_webhook_configuration # noqa: E501
delete a MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
def delete_mutating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_mutating_webhook_configuration # noqa: E501
delete a MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_webhook_configuration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_mutating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_validating_admission_policy(self, name, **kwargs): # noqa: E501
"""delete_validating_admission_policy # noqa: E501
delete a ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_admission_policy(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
def delete_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_validating_admission_policy # noqa: E501
delete a ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_admission_policy_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
"""delete_validating_admission_policy_binding # noqa: E501
delete a ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_admission_policy_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
def delete_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_validating_admission_policy_binding # noqa: E501
delete a ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_admission_policy_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_validating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""delete_validating_webhook_configuration # noqa: E501
delete a ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_validating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
def delete_validating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_validating_webhook_configuration # noqa: E501
delete a ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_webhook_configuration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_mutating_webhook_configuration(self, **kwargs): # noqa: E501
"""list_mutating_webhook_configuration # noqa: E501
list or watch objects of kind MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1MutatingWebhookConfigurationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
def list_mutating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
"""list_mutating_webhook_configuration # noqa: E501
list or watch objects of kind MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mutating_webhook_configuration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1MutatingWebhookConfigurationList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MutatingWebhookConfigurationList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_validating_admission_policy(self, **kwargs): # noqa: E501
"""list_validating_admission_policy # noqa: E501
list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_admission_policy(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
def list_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
"""list_validating_admission_policy # noqa: E501
list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_admission_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_validating_admission_policy_binding(self, **kwargs): # noqa: E501
"""list_validating_admission_policy_binding # noqa: E501
list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_admission_policy_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
def list_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
"""list_validating_admission_policy_binding # noqa: E501
list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_admission_policy_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyBindingList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyBindingList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_validating_webhook_configuration(self, **kwargs): # noqa: E501
"""list_validating_webhook_configuration # noqa: E501
list or watch objects of kind ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingWebhookConfigurationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_validating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
def list_validating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
"""list_validating_webhook_configuration # noqa: E501
list or watch objects of kind ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_validating_webhook_configuration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingWebhookConfigurationList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingWebhookConfigurationList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_mutating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_webhook_configuration # noqa: E501
partially update the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_webhook_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_mutating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
def patch_mutating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_mutating_webhook_configuration # noqa: E501
partially update the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_mutating_webhook_configuration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_mutating_webhook_configuration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_mutating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MutatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy # noqa: E501
partially update the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
def patch_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy # noqa: E501
partially update the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy_binding # noqa: E501
partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
def patch_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy_binding # noqa: E501
partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy_status # noqa: E501
partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_validating_admission_policy_status # noqa: E501
partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_admission_policy_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_validating_admission_policy_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_validating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
"""patch_validating_webhook_configuration # noqa: E501
partially update the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_webhook_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_validating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
def patch_validating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_validating_webhook_configuration # noqa: E501
partially update the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_validating_webhook_configuration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_webhook_configuration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
def read_mutating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
"""read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_mutating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MutatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_validating_admission_policy(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy # noqa: E501
read the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
def read_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy # noqa: E501
read the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy_binding # noqa: E501
read the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
def read_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy_binding # noqa: E501
read the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_validating_admission_policy_status(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy_status # noqa: E501
read status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_validating_admission_policy_status_with_http_info(name, **kwargs) # noqa: E501
def read_validating_admission_policy_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_validating_admission_policy_status # noqa: E501
read status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_admission_policy_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_validating_admission_policy_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_validating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""read_validating_webhook_configuration # noqa: E501
read the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_validating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
def read_validating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
"""read_validating_webhook_configuration # noqa: E501
read the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_validating_webhook_configuration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_validating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_mutating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_webhook_configuration # noqa: E501
replace the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_webhook_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param V1MutatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_mutating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
def replace_mutating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_mutating_webhook_configuration # noqa: E501
replace the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_mutating_webhook_configuration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MutatingWebhookConfiguration (required)
:param V1MutatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_mutating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_mutating_webhook_configuration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_mutating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MutatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy # noqa: E501
replace the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
def replace_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy # noqa: E501
replace the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_validating_admission_policy" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy_binding # noqa: E501
replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param V1ValidatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicyBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
def replace_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy_binding # noqa: E501
replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicyBinding (required)
:param V1ValidatingAdmissionPolicyBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_validating_admission_policy_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicybindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicyBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy_status # noqa: E501
replace status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingAdmissionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_validating_admission_policy_status # noqa: E501
replace status of the specified ValidatingAdmissionPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_admission_policy_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingAdmissionPolicy (required)
:param V1ValidatingAdmissionPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_validating_admission_policy_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingadmissionpolicies/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingAdmissionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_validating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
"""replace_validating_webhook_configuration # noqa: E501
replace the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_webhook_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param V1ValidatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ValidatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_validating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
def replace_validating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_validating_webhook_configuration # noqa: E501
replace the specified ValidatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_validating_webhook_configuration_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ValidatingWebhookConfiguration (required)
:param V1ValidatingWebhookConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_validating_webhook_configuration" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_webhook_configuration`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_webhook_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ValidatingWebhookConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| AdmissionregistrationV1Api |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 490,
"end": 793
} | class ____(str, Enum):
"""Set the strategy when doing a filtered HNSW search.
Attributes:
SWEEPING: Do normal ANN search and skip nodes.
ACORN: Multi-hop search to find new candidates matching the filter.
"""
SWEEPING = "sweeping"
ACORN = "acorn"
| VectorFilterStrategy |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 14743,
"end": 16404
} | class ____(Structure):
_fields_ = (
("segname", p_str16),
("vmaddr", p_uint32),
("vmsize", p_uint32),
("fileoff", p_uint32),
("filesize", p_uint32),
("maxprot", vm_prot_t),
("initprot", vm_prot_t),
("nsects", p_uint32), # read the section structures ?
("flags", p_uint32),
)
def describe(self):
s = {}
s["segname"] = self.segname.rstrip("\x00")
s["vmaddr"] = int(self.vmaddr)
s["vmsize"] = int(self.vmsize)
s["fileoff"] = int(self.fileoff)
s["filesize"] = int(self.filesize)
s["initprot"] = self.get_initial_virtual_memory_protections()
s["initprot_raw"] = int(self.initprot)
s["maxprot"] = self.get_max_virtual_memory_protections()
s["maxprot_raw"] = int(self.maxprot)
s["nsects"] = int(self.nsects)
s["flags"] = self.flags
return s
def get_initial_virtual_memory_protections(self):
vm = []
if self.initprot == 0:
vm.append("VM_PROT_NONE")
if self.initprot & 1:
vm.append("VM_PROT_READ")
if self.initprot & 2:
vm.append("VM_PROT_WRITE")
if self.initprot & 4:
vm.append("VM_PROT_EXECUTE")
return vm
def get_max_virtual_memory_protections(self):
vm = []
if self.maxprot == 0:
vm.append("VM_PROT_NONE")
if self.maxprot & 1:
vm.append("VM_PROT_READ")
if self.maxprot & 2:
vm.append("VM_PROT_WRITE")
if self.maxprot & 4:
vm.append("VM_PROT_EXECUTE")
return vm
| segment_command |
python | huggingface__transformers | src/transformers/models/mbart/modeling_mbart.py | {
"start": 65433,
"end": 66013
} | class ____(MBartPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = MBartDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25
| MBartDecoderWrapper |
python | TheAlgorithms__Python | data_structures/heap/heap.py | {
"start": 445,
"end": 7390
} | class ____[T: Comparable]:
"""A Max Heap Implementation
>>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]
>>> h = Heap()
>>> h.build_max_heap(unsorted)
>>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>>
>>> h.extract_max()
209
>>> h
[201, 107, 25, 103, 11, 15, 1, 9, 7, 5]
>>>
>>> h.insert(100)
>>> h
[201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11]
>>>
>>> h.heap_sort()
>>> h
[1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
"""
def __init__(self) -> None:
self.h: list[T] = []
self.heap_size: int = 0
def __repr__(self) -> str:
return str(self.h)
def parent_index(self, child_idx: int) -> int | None:
"""
returns the parent index based on the given child index
>>> h = Heap()
>>> h.build_max_heap([103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5])
>>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>> h.parent_index(-1) # returns none if index is <=0
>>> h.parent_index(0) # returns none if index is <=0
>>> h.parent_index(1)
0
>>> h.parent_index(2)
0
>>> h.parent_index(3)
1
>>> h.parent_index(4)
1
>>> h.parent_index(5)
2
>>> h.parent_index(10.5)
4.0
>>> h.parent_index(209.0)
104.0
>>> h.parent_index("Test")
Traceback (most recent call last):
...
TypeError: '>' not supported between instances of 'str' and 'int'
"""
if child_idx > 0:
return (child_idx - 1) // 2
return None
def left_child_idx(self, parent_idx: int) -> int | None:
"""
return the left child index if the left child exists.
if not, return None.
"""
left_child_index = 2 * parent_idx + 1
if left_child_index < self.heap_size:
return left_child_index
return None
def right_child_idx(self, parent_idx: int) -> int | None:
"""
return the right child index if the right child exists.
if not, return None.
"""
right_child_index = 2 * parent_idx + 2
if right_child_index < self.heap_size:
return right_child_index
return None
def max_heapify(self, index: int) -> None:
"""
correct a single violation of the heap property in a subtree's root.
It is the function that is responsible for restoring the property
of Max heap i.e the maximum element is always at top.
"""
if index < self.heap_size:
violation: int = index
left_child = self.left_child_idx(index)
right_child = self.right_child_idx(index)
# check which child is larger than its parent
if left_child is not None and self.h[left_child] > self.h[violation]:
violation = left_child
if right_child is not None and self.h[right_child] > self.h[violation]:
violation = right_child
# if violation indeed exists
if violation != index:
# swap to fix the violation
self.h[violation], self.h[index] = self.h[index], self.h[violation]
# fix the subsequent violation recursively if any
self.max_heapify(violation)
def build_max_heap(self, collection: Iterable[T]) -> None:
"""
build max heap from an unsorted array
>>> h = Heap()
>>> h.build_max_heap([20,40,50,20,10])
>>> h
[50, 40, 20, 20, 10]
>>> h = Heap()
>>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0])
>>> h
[9, 8, 7, 4, 5, 6, 3, 2, 1, 0]
>>> h = Heap()
>>> h.build_max_heap([514,5,61,57,8,99,105])
>>> h
[514, 57, 105, 5, 8, 99, 61]
>>> h = Heap()
>>> h.build_max_heap([514,5,61.6,57,8,9.9,105])
>>> h
[514, 57, 105, 5, 8, 9.9, 61.6]
"""
self.h = list(collection)
self.heap_size = len(self.h)
if self.heap_size > 1:
# max_heapify from right to left but exclude leaves (last level)
for i in range(self.heap_size // 2 - 1, -1, -1):
self.max_heapify(i)
def extract_max(self) -> T:
"""
get and remove max from heap
>>> h = Heap()
>>> h.build_max_heap([20,40,50,20,10])
>>> h.extract_max()
50
>>> h = Heap()
>>> h.build_max_heap([514,5,61,57,8,99,105])
>>> h.extract_max()
514
>>> h = Heap()
>>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0])
>>> h.extract_max()
9
"""
if self.heap_size >= 2:
me = self.h[0]
self.h[0] = self.h.pop(-1)
self.heap_size -= 1
self.max_heapify(0)
return me
elif self.heap_size == 1:
self.heap_size -= 1
return self.h.pop(-1)
else:
raise Exception("Empty heap")
def insert(self, value: T) -> None:
"""
insert a new value into the max heap
>>> h = Heap()
>>> h.insert(10)
>>> h
[10]
>>> h = Heap()
>>> h.insert(10)
>>> h.insert(10)
>>> h
[10, 10]
>>> h = Heap()
>>> h.insert(10)
>>> h.insert(10.1)
>>> h
[10.1, 10]
>>> h = Heap()
>>> h.insert(0.1)
>>> h.insert(0)
>>> h.insert(9)
>>> h.insert(5)
>>> h
[9, 5, 0.1, 0]
"""
self.h.append(value)
idx = (self.heap_size - 1) // 2
self.heap_size += 1
while idx >= 0:
self.max_heapify(idx)
idx = (idx - 1) // 2
def heap_sort(self) -> None:
size = self.heap_size
for j in range(size - 1, 0, -1):
self.h[0], self.h[j] = self.h[j], self.h[0]
self.heap_size -= 1
self.max_heapify(0)
self.heap_size = size
if __name__ == "__main__":
import doctest
# run doc test
doctest.testmod()
# demo
for unsorted in [
[0],
[2],
[3, 5],
[5, 3],
[5, 5],
[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 3, 5],
[0, 2, 2, 3, 5],
[2, 5, 3, 0, 2, 3, 0, 3],
[6, 1, 2, 7, 9, 3, 4, 5, 10, 8],
[103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5],
[-45, -2, -5],
]:
print(f"unsorted array: {unsorted}")
heap: Heap[int] = Heap()
heap.build_max_heap(unsorted)
print(f"after build heap: {heap}")
print(f"max value: {heap.extract_max()}")
print(f"after max value removed: {heap}")
heap.insert(100)
print(f"after new value 100 inserted: {heap}")
heap.heap_sort()
print(f"heap-sorted array: {heap}\n")
| Heap |
python | huggingface__transformers | src/transformers/models/detr/modeling_detr.py | {
"start": 46548,
"end": 54318
} | class ____(DetrPreTrainedModel):
def __init__(self, config: DetrConfig):
super().__init__(config)
# Create backbone + positional encoding
backbone = DetrConvEncoder(config)
object_queries = build_position_encoding(config)
self.backbone = DetrConvModel(backbone, object_queries)
# Create projection layer
self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = DetrEncoder(config)
self.decoder = DetrDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], DetrModelOutput]:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
Examples:
```python
>>> from transformers import AutoImageProcessor, DetrModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
>>> model = DetrModel.from_pretrained("facebook/detr-resnet-50")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the last hidden states are the final query embeddings of the Transformer decoder
>>> # these are of shape (batch_size, num_queries, hidden_size)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 100, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), device=device)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# pixel_values should be of shape (batch_size, num_channels, height, width)
# pixel_mask should be of shape (batch_size, height, width)
features, object_queries_list = self.backbone(pixel_values, pixel_mask)
# get final feature map and downsampled mask
feature_map, mask = features[-1]
if mask is None:
raise ValueError("Backbone does not return downsampled pixel mask")
# Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
projected_feature_map = self.input_projection(feature_map)
# Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC
# In other words, turn their shape into (batch_size, sequence_length, hidden_size)
flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1)
flattened_mask = mask.flatten(1)
# Fourth, sent flattened_features + flattened_mask + position embeddings through encoder
# flattened_features is a Tensor of shape (batch_size, height*width, hidden_size)
# flattened_mask is a Tensor of shape (batch_size, height*width)
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=flattened_features,
attention_mask=flattened_mask,
object_queries=object_queries,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output)
query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
queries = torch.zeros_like(query_position_embeddings)
# decoder outputs consists of (dec_features, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
inputs_embeds=queries,
attention_mask=None,
object_queries=object_queries,
query_position_embeddings=query_position_embeddings,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=flattened_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return DetrModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
)
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
| DetrModel |
python | cython__cython | tests/run/pep526_variable_annotations.py | {
"start": 1593,
"end": 2136
} | class ____(object):
"""
>>> bs = BasicStarship(5)
>>> bs.damage
5
>>> bs.captain
'Picard'
>>> bs.stats
{}
>>> BasicStarship.stats
{}
"""
captain: str = 'Picard' # instance variable with default
damage: cython.int # instance variable without default
stats: ClassVar[Dict[str, cython.int]] = {} # class variable
descr_only: "descriptions are allowed but ignored"
def __init__(self, damage):
self.damage = damage
@cython.cclass
| BasicStarship |
python | getsentry__sentry | tests/sentry/runner/commands/test_backup.py | {
"start": 5508,
"end": 9796
} | class ____(TestCase):
"""
Test success cases of the `sentry compare` CLI command on encrypted inputs.
"""
def test_compare_decrypt_with(self) -> None:
with TemporaryDirectory() as tmp_dir:
(tmp_priv_key_path, _, tmp_encrypted_path) = create_encryption_test_files(tmp_dir)
tmp_findings = Path(tmp_dir).joinpath(f"{self._testMethodName}.findings.json")
cases = [
# Only left-side encrypted.
[
"compare",
str(tmp_encrypted_path),
GOOD_FILE_PATH,
"--findings-file",
str(tmp_findings),
"--decrypt-left-with",
str(tmp_priv_key_path),
],
# Only right-side encrypted.
[
"compare",
GOOD_FILE_PATH,
str(tmp_encrypted_path),
"--findings-file",
str(tmp_findings),
"--decrypt-right-with",
str(tmp_priv_key_path),
],
# Both sides encrypted.
[
"compare",
str(tmp_encrypted_path),
str(tmp_encrypted_path),
"--findings-file",
str(tmp_findings),
"--decrypt-left-with",
str(tmp_priv_key_path),
"--decrypt-right-with",
str(tmp_priv_key_path),
],
]
for args in cases:
rv = CliRunner().invoke(backup, args)
assert rv.exit_code == 0, rv.output
with open(tmp_findings) as findings_file:
findings = json.load(findings_file)
assert len(findings) == 0
@patch("sentry.backup.crypto.KeyManagementServiceClient")
def test_compare_decrypt_with_gcp_kms(self, fake_kms_client: mock.Mock) -> None:
with TemporaryDirectory() as tmp_dir:
(tmp_priv_key_path, _, tmp_encrypted_path) = create_encryption_test_files(tmp_dir)
gcp_kms_config_path = mock_gcp_kms_asymmetric_decrypt(
tmp_dir, tmp_priv_key_path, tmp_encrypted_path, fake_kms_client
)
tmp_findings = Path(tmp_dir).joinpath(f"{self._testMethodName}.findings.json")
cases = [
# Only left-side encrypted.
[
"compare",
str(tmp_encrypted_path),
GOOD_FILE_PATH,
"--findings-file",
str(tmp_findings),
"--decrypt-left-with-gcp-kms",
str(gcp_kms_config_path),
],
# Only right-side encrypted.
[
"compare",
GOOD_FILE_PATH,
str(tmp_encrypted_path),
"--findings-file",
str(tmp_findings),
"--decrypt-right-with-gcp-kms",
str(gcp_kms_config_path),
],
# Both sides encrypted.
[
"compare",
str(tmp_encrypted_path),
str(tmp_encrypted_path),
"--findings-file",
str(tmp_findings),
"--decrypt-left-with-gcp-kms",
str(gcp_kms_config_path),
"--decrypt-right-with-gcp-kms",
str(gcp_kms_config_path),
],
]
for args in cases:
fake_kms_client.reset_mock()
rv = CliRunner().invoke(backup, args, catch_exceptions=False)
assert rv.exit_code == 0, rv.output
with open(tmp_findings) as findings_file:
findings = json.load(findings_file)
assert len(findings) == 0
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == len(
[arg for arg in args if arg == str(gcp_kms_config_path)]
)
| GoodCompareCommandEncryptionTests |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 7900,
"end": 33058
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, run_function, preserve_rng_state, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.preserve_rng_state = preserve_rng_state
# Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
ctx.device_type = _infer_device_type(*args)
ctx.device_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs(
ctx.device_type
)
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
# Don't eagerly initialize the cuda context by accident.
# (If the user intends that the context is initialized later, within their
# run_function, we SHOULD actually stash the cuda state here. Unfortunately,
# we have no way to anticipate this will happen before we run the function.)
ctx.had_device_in_fwd = False
device_module = _get_device_module(ctx.device_type)
if getattr(device_module, "_initialized", False):
ctx.had_device_in_fwd = True
ctx.fwd_devices, ctx.fwd_device_states = get_device_states(*args)
# Save non-tensor inputs in ctx, keep a placeholder None for tensors
# to be filled out during the backward.
ctx.inputs = []
ctx.tensor_indices = []
tensor_inputs = []
for i, arg in enumerate(args):
if torch.is_tensor(arg):
tensor_inputs.append(arg)
ctx.tensor_indices.append(i)
ctx.inputs.append(None)
else:
ctx.inputs.append(arg)
ctx.save_for_backward(*tensor_inputs)
with torch.no_grad():
outputs = run_function(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"When use_reentrant=True, torch.utils.checkpoint is incompatible"
" with .grad() or passing an `inputs` parameter to .backward()."
" To resolve this error, you can either set use_reentrant=False,"
" or call .backward() without passing the `inputs` argument."
)
# Copy the list to avoid modifying original list.
inputs = list(ctx.inputs)
tensor_indices = ctx.tensor_indices
tensors = ctx.saved_tensors
# Fill in inputs with appropriate saved tensors.
for i, idx in enumerate(tensor_indices):
inputs[idx] = tensors[i]
# Stash the surrounding rng state, and mimic the state that was
# present at this time during forward. Restore the surrounding state
# when we're done.
rng_devices = []
if ctx.preserve_rng_state and ctx.had_device_in_fwd:
rng_devices = ctx.fwd_devices
with torch.random.fork_rng(
devices=rng_devices, enabled=ctx.preserve_rng_state, device_type=ctx.device_type
):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_device_in_fwd:
set_device_states(ctx.fwd_devices, ctx.fwd_device_states, device_type=ctx.device_type)
detached_inputs = detach_variable(tuple(inputs))
device_autocast_ctx = torch.amp.autocast(
device_type=ctx.device_type, **ctx.device_autocast_kwargs
) if torch.amp.is_autocast_available(ctx.device_type) else contextlib.nullcontext()
with torch.enable_grad(), device_autocast_ctx, torch.amp.autocast("cpu", **ctx.cpu_autocast_kwargs): # type: ignore[attr-defined]
outputs = ctx.run_function(*detached_inputs)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
# run backward() with only tensor that requires grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(outputs)):
if torch.is_tensor(outputs[i]) and outputs[i].requires_grad:
outputs_with_grad.append(outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"none of output has requires_grad=True,"
" this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None
for inp in detached_inputs
)
return (None, None) + grads
def noop_context_fn():
return contextlib.nullcontext(), contextlib.nullcontext()
# Note: [torch.compile and checkpoint]
# TorchDynamo does not step inside utils.checkpoint function. The flow
# looks likes this
# 1) TorchDynamo tries to wrap utils.checkpoint in a HigherOrderOp by
# speculatively checking if the forward function is safe to trace.
# 2) If yes, then Dynamo-generated Fx graph has the wrapped higher
# order op. As a result, TorchDynamo does not look inside utils.checkpoint.
# 3) If not, then TorchDynamo falls back to eager by performing a graph
# break. And here, the following disable wrapper ensures that
# TorchDynamo does not trigger again on the frames created by
# utils.checkpoint innards.
@torch._disable_dynamo
def checkpoint(
function,
*args,
use_reentrant: Optional[bool] = None,
context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn,
determinism_check: str = _DEFAULT_DETERMINISM_MODE,
debug: bool = False,
early_stop: bool = True,
**kwargs
):
r"""Checkpoint a model or part of the model.
Activation checkpointing is a technique that trades compute for memory.
Instead of keeping tensors needed for backward alive until they are used in
gradient computation during backward, forward computation in checkpointed
regions omits saving tensors for backward and recomputes them during the
backward pass. Activation checkpointing can be applied to any part of a
model.
There are currently two checkpointing implementations available, determined
by the :attr:`use_reentrant` parameter. It is recommended that you use
``use_reentrant=False``. Please refer the note below for a discussion of
their differences.
.. warning::
If the :attr:`function` invocation during the backward pass differs
from the forward pass, e.g., due to a global variable, the checkpointed
version may not be equivalent, potentially causing an
error being raised or leading to silently incorrect gradients.
.. warning::
The ``use_reentrant`` parameter should be passed explicitly. In version
2.9 we will raise an exception if ``use_reentrant`` is not passed.
If you are using the ``use_reentrant=True`` variant, please refer to the
note below for important considerations and potential limitations.
.. note::
The reentrant variant of checkpoint (``use_reentrant=True``) and
the non-reentrant variant of checkpoint (``use_reentrant=False``)
differ in the following ways:
* Non-reentrant checkpoint stops recomputation as soon as all needed
intermediate activations have been recomputed. This feature is enabled
by default, but can be disabled with :func:`set_checkpoint_early_stop`.
Reentrant checkpoint always recomputes :attr:`function` in its
entirety during the backward pass.
* The reentrant variant does not record the autograd graph during the
forward pass, as it runs with the forward pass under
:func:`torch.no_grad`. The non-reentrant version does record the
autograd graph, allowing one to perform backward on the graph within
checkpointed regions.
* The reentrant checkpoint only supports the
:func:`torch.autograd.backward` API for the backward pass without its
`inputs` argument, while the non-reentrant version supports all ways
of performing the backward pass.
* At least one input and output must have ``requires_grad=True`` for the
reentrant variant. If this condition is unmet, the checkpointed part
of the model will not have gradients. The non-reentrant version does
not have this requirement.
* The reentrant version does not consider tensors in nested structures
(e.g., custom objects, lists, dicts, etc) as participating in
autograd, while the non-reentrant version does.
* The reentrant checkpoint does not support checkpointed regions with
detached tensors from the computational graph, whereas the
non-reentrant version does. For the reentrant variant, if the
checkpointed segment contains tensors detached using ``detach()`` or
with :func:`torch.no_grad`, the backward pass will raise an error.
This is because ``checkpoint`` makes all the outputs require gradients
and this causes issues when a tensor is defined to have no gradient in
the model. To avoid this, detach the tensors outside of the
``checkpoint`` function.
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
args: tuple containing inputs to the :attr:`function`
Keyword args:
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint. Note that under torch.compile,
this flag doesn't take effect and we always preserve RNG state.
Default: ``True``
use_reentrant(bool):
specify whether to use the activation checkpoint variant that
requires reentrant autograd. This parameter should be passed
explicitly. In version 2.9 we will raise an exception if
``use_reentrant`` is not passed. If ``use_reentrant=False``,
``checkpoint`` will use an implementation that does not require
reentrant autograd. This allows ``checkpoint`` to support additional
functionality, such as working as expected with
``torch.autograd.grad`` and support for keyword arguments input into
the checkpointed function.
context_fn(Callable, optional): A callable returning a tuple of two
context managers. The function and its recomputation will be run
under the first and second context managers respectively.
This argument is only supported if ``use_reentrant=False``.
determinism_check(str, optional): A string specifying the determinism
check to perform. By default it is set to ``"default"`` which
compares the shapes, dtypes, and devices of the recomputed tensors
against those the saved tensors. To turn off this check, specify
``"none"``. Currently these are the only two supported values.
Please open an issue if you would like to see more determinism
checks. This argument is only supported if ``use_reentrant=False``,
if ``use_reentrant=True``, the determinism check is always disabled.
debug(bool, optional): If ``True``, error messages will also include
a trace of the operators ran during the original forward computation
as well as the recomputation. This argument is only supported if
``use_reentrant=False``.
early_stop(bool, optional): If ``True``, non-reentrant checkpoint stops
recomputation as soon as it has computed all needed Tensors. This
argument is ignored if ``use_reentrant=True``. Can be overridden
globally using :func:`set_checkpoint_early_stop` context manager.
Default: ``True``.
Returns:
Output of running :attr:`function` on :attr:`*args`
"""
if use_reentrant is None:
warnings.warn(
"torch.utils.checkpoint: the use_reentrant parameter should be "
"passed explicitly. Starting in PyTorch 2.9, calling checkpoint "
"without use_reentrant will raise an exception. use_reentrant=False is "
"recommended, but if you need to preserve the current default "
"behavior, you can pass use_reentrant=True. Refer to docs for more "
"details on the differences between the two variants.",
stacklevel=2
)
use_reentrant = True
# Hack to mix *args with **kwargs in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs and use_reentrant:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
if use_reentrant:
if context_fn is not noop_context_fn or debug is not False:
raise ValueError(
"Passing `context_fn` or `debug` is only supported when "
"use_reentrant=False."
)
return CheckpointFunction.apply(function, preserve, *args)
else:
gen = _checkpoint_without_reentrant_generator(
function, preserve, context_fn, determinism_check, debug, early_stop, *args, **kwargs
)
# Runs pre-forward logic
next(gen)
ret = function(*args, **kwargs)
# Runs post-forward logic
try:
next(gen)
except StopIteration:
return ret
def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs):
r"""Checkpoint a sequential model to save memory.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a model in various segments
and checkpoint each segment. All segments except the last will not store
the intermediate activations. The inputs of each checkpointed segment will
be saved for re-running the segment in the backward pass.
.. warning::
The ``use_reentrant`` parameter should be passed explicitly. In version
2.9 we will raise an exception if ``use_reentrant`` is not passed.
If you are using the ``use_reentrant=True` variant, please see
:func:`~torch.utils.checkpoint.checkpoint` for
the important considerations and limitations of this variant. It is
recommended that you use ``use_reentrant=False``.
.. warning:
Since PyTorch 1.4, it allows only one Tensor as the input and
intermediate outputs, just like :class:`torch.nn.Sequential`.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or
functions (comprising the model) to run sequentially.
segments: Number of chunks to create in the model
input: A Tensor that is input to :attr:`functions`
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint.
Default: ``True``
use_reentrant(bool):
specify whether to use the activation checkpoint variant that
requires reentrant autograd. This parameter should be passed
explicitly. In version 2.5 we will raise an exception if
``use_reentrant`` is not passed. If ``use_reentrant=False``,
``checkpoint`` will use an implementation that does not require
reentrant autograd. This allows ``checkpoint`` to support additional
functionality, such as working as expected with
``torch.autograd.grad`` and support for keyword arguments input into
the checkpointed function.
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> # xdoctest: +SKIP("stub")
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_sequential(model, chunks, input_var)
"""
if use_reentrant is None:
warnings.warn(
"torch.utils.checkpoint.checkpoint_sequential: the use_reentrant "
"parameter should be passed explicitly. "
"In version 2.9 we will raise an exception if use_reentrant "
"is not passed. use_reentrant=False is "
"recommended, but if you need to preserve the current default "
"behavior, you can pass use_reentrant=True. Refer to docs for more "
"details on the differences between the two variants.", stacklevel=2
)
use_reentrant = True
# Hack for keyword-only parameter in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
def run_function(start, end, functions):
def forward(input):
for j in range(start, end + 1):
input = functions[j](input)
return input
return forward
if isinstance(functions, torch.nn.Sequential):
functions = list(functions.children())
segment_size = len(functions) // segments
# the last chunk has to be non-volatile
end = -1
for start in range(0, segment_size * (segments - 1), segment_size):
end = start + segment_size - 1
input = checkpoint(
run_function(start, end, functions),
input,
use_reentrant=use_reentrant,
preserve_rng_state=preserve,
)
return run_function(end + 1, len(functions) - 1, functions)(input)
def _internal_assert(cond) -> None:
if not cond:
raise AssertionError(
"Something went unexpectedly wrong in activation checkpoint. "
"Please report this bug by filing an issue to PyTorch."
)
# NOTE [ Nestable Checkpoint ]
#
# The semantics of nested checkpoint can be defined by two basic rules.
# Following the two rules leads to an important implication that is central
# to motivating the design.
#
# Rule 1. Saved tensors are managed by inner-most checkpoint only and hidden
# from any outer layers of checkpoint.
#
# Rule 2. The inputs of inner checkpoints are treated as tensors saved to its
# parent checkpoint.
#
# Implication: To recompute any given saved tensor, we need to recompute all of
# the checkpoints wrapping it.
#
# Why is this implied? To unpack a saved tensor X during backward we need to
# recompute the inner-most checkpoint (#1), and in order to recompute that
# checkpoint I need to have its inputs, which are managed by that checkpoint's
# parent (#2), which thus also needs to be recomputed first. Continue this line
# of reasoning and we realize that in order to unpack X, all checkpoints that
# were active at the time X was saved need to be recomputed. (unless we have
# already done so in that backward for some other saved tensor).
#
# In practice, we use a noop autograd Function to save inputs as saved tensors.
# During unpack calling ctx.saved_tensor triggers the parent checkpoint to
# recompute.
#
# Rule 3. We should start recomputation as if there are no checkpoints currently
# active. Checkpoints encountered during recomputation are still
# respected.
#
# When we start recomputation, we push the saved variable hook meant for
# recomputation on the stack. See examples in Rule 6 for more context.
#
# * * * *
#
# Beyond the basic semantics specific to nested checkpoint, we impose several
# more constraints that may apply to checkpointing in general.
#
# Rule 4. Lifetime of recomputed tensors
#
# Recomputed tensors are considered specific to particular invocations
# of backward and are always cleared immediately as they are unpacked
# Particularly, we require this to happen even if retain_graph=True.
#
# [ Implementation details of Rule 4 ]
#
# If we were okay with recomputed tensors staying alive after backward is run
# with retain_graph=True, we would store recomputed variables as the values of a
# WeakKeyDictionary and pack strong references to the keys, so that as we
# backward, those packed keys would be cleared as long as retain_graph=False.
# Clearing the packed key clears the corresponding entry in the WKD.
#
# If we wish recomputed variables to be immediately cleared as we unpack them in
# the retain_graph=True case, we cannot rely on the packed keys to be cleared by
# backward automatically. Instead of packing the strong reference to the key
# directly, we pack a container object, which we manually clear as we unpack.
#
# An important detail is that if a second backward happens, the second
# recomputation needs to reset the container with a newly created key.
#
# Rule 5. Stop recomputation as soon as we've recomputed the saved tensors we
# know we need.
#
# [ Implementation details of Rule 5 ]
#
# During recomputation, raise an exception if the number of recomputed tensors
# matches the number of tensors that we expected to recompute. We wrap the
# recomputation call with a try-catch to catch this specific exception. See
# Rule #6 below for some examples.
#
# Rule 6. We support doing backward inside checkpoint context
#
# [ retain_graph is True]
#
# def fn(x):
# y = x.sin()
# z = y.cos()
# gx, = torch.autograd.grad(z, x, retains_grad=True)
# return gx, z
#
# out = checkpoint(fn)(inp)
# out.backward()
#
# Because z is saved by cos while checkpoint is enabled, it would not be
# actually saved, and so the .grad() call inside must trigger a recomputation.
#
# During recomputation the "inner pack hook" has two responsibilities:
#
# 1) As usual, populating the WeakKeyDictionary storing recomputed tensors
# 2) Pack the actual tensor (detached) so that one may perform backward on the
# recomputed graph. The tensors saved to this graph will live until the end
# of recomputation, or die earlier if someone performs backward with
# retain_graph=False.
#
# More generally performing backward on the recomputed graph occurs in the
# following cases:
# - If backward is performed inside forward,
# - During the original forward IF early-stop is disabled
# - During the original backward
# - If there are multiple .grad()/.backward() calls, we would perform backward
# on the recomputed graph even if early-stop is enabled (see the example below)
#
# [ retain_graph is False ]
#
# The example below shows what happens if during recomputation we find that some
# of the tensors we are trying to recompute have already been cleared.
#
# Spoiler: we don't do anything special, we just skip over them!
#
# def fn(x):
# y = x.sin() # (1)
# z = y.cos() # (2)
# gx, = torch.autograd.grad(z, x) # (3)
# return x.cos() * gx # (4)
#
# out = checkpoint(fn)(inp)
# out.backward() # (5)
#
# 1, 2. Don't save x and y since we are inside a checkpoint.
# 3. Trigger a recompute of fn since x and y weren't saved.
# And depending on whether early stop is enabled, either stop at (2) or
# continue running the function.
# Because we are running backward with retain_graph=False, we clear x and y's
# holders.
# 4. Don't save x since we are inside a checkpoint.
# 5. Calling backward triggers another recompute of fn. During recompute, we see
# that x and y have already been cleared in the original graph as indicated
# by holder=None. We skip over them. We still save x at (4) (since its holder
# is still alive.)
_enable_checkpoint_early_stop: Optional[bool] = None
@contextlib.contextmanager
def set_checkpoint_early_stop(enable: bool):
"""Context manager that sets whether checkpoint should stop recomputation early.
By default, non-reentrant checkpoint stops recomputation as soon as it
has computed all needed Tensors. This context manager can be used to disable
that feature if it is problematic for your specific application.
This context manager only needs to be active when forward is run. It does
not need to be active during backward.
Example::
>>> # xdoctest: +SKIP(failing)
>>> message = "saved tensors default hooks are disabled"
>>> with set_checkpoint_early_stop(False):
... # Any checkpoint under this context manager will respect this
... # context manager, even if its backward is performed outside.
... out = checkpoint(fn, inputs)
...
>>> out.backward()
"""
global _enable_checkpoint_early_stop
try:
prev = _enable_checkpoint_early_stop
_enable_checkpoint_early_stop = enable
yield
finally:
_enable_checkpoint_early_stop = prev
| CheckpointFunction |
python | pypa__pip | src/pip/_internal/cli/parser.py | {
"start": 5239,
"end": 10899
} | class ____(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
def __init__(
self,
*args: Any,
name: str,
isolated: bool = False,
**kwargs: Any,
) -> None:
self.name = name
self.config = Configuration(isolated)
assert self.name
super().__init__(*args, **kwargs)
def check_default(self, option: optparse.Option, key: str, val: Any) -> Any:
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print(f"An error occurred during configuration: {exc}")
sys.exit(3)
def _get_ordered_configuration_items(
self,
) -> Generator[tuple[str, Any], None, None]:
# Configuration gives keys in an unordered manner. Order them.
override_order = ["global", self.name, ":env:"]
# Pool the options into different groups
section_items: dict[str, list[tuple[str, Any]]] = {
name: [] for name in override_order
}
for _, value in self.config.items():
for section_key, val in value.items():
# ignore empty values
if not val:
logger.debug(
"Ignoring configuration key '%s' as its value is empty.",
section_key,
)
continue
section, key = section_key.split(".", 1)
if section in override_order:
section_items[section].append((key, val))
# Yield each group in their override order
for section in override_order:
yield from section_items[section]
def _update_defaults(self, defaults: dict[str, Any]) -> dict[str, Any]:
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in self._get_ordered_configuration_items():
# '--' because configuration supports only long names
option = self.get_option("--" + key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
assert option.dest is not None
if option.action in ("store_true", "store_false"):
try:
val = strtobool(val)
except ValueError:
self.error(
f"{val} is not a valid value for {key} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead."
)
elif option.action == "count":
with suppress(ValueError):
val = strtobool(val)
with suppress(ValueError):
val = int(val)
if not isinstance(val, int) or val < 0:
self.error(
f"{val} is not a valid value for {key} option, "
"please instead specify either a non-negative integer "
"or a boolean value like yes/no or false/true "
"which is equivalent to 1/0."
)
elif option.action == "append":
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == "callback":
assert option.callback is not None
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def get_default_values(self) -> optparse.Values:
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
# Load the configuration, or error out in case of an error
try:
self.config.load()
except ConfigurationError as err:
self.exit(UNKNOWN_ERROR, str(err))
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
assert option.dest is not None
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg: str) -> NoReturn:
self.print_usage(sys.stderr)
self.exit(UNKNOWN_ERROR, f"{msg}\n")
| ConfigOptionParser |
python | kamyu104__LeetCode-Solutions | Python/moving-average-from-data-stream.py | {
"start": 60,
"end": 577
} | class ____(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.__size = size
self.__sum = 0
self.__q = deque()
def next(self, val):
"""
:type val: int
:rtype: float
"""
if len(self.__q) == self.__size:
self.__sum -= self.__q.popleft()
self.__sum += val
self.__q.append(val)
return 1.0 * self.__sum / len(self.__q)
| MovingAverage |
python | coleifer__peewee | tests/reflection.py | {
"start": 15988,
"end": 16267
} | class ____(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
| EventLog |
python | kamyu104__LeetCode-Solutions | Python/k-th-smallest-prime-fraction.py | {
"start": 33,
"end": 1097
} | class ____(object):
def kthSmallestPrimeFraction(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: List[int]
"""
def check(mid, A, K, result):
tmp = [0]*2
count = 0
j = 0
for i in xrange(len(A)):
while j < len(A):
if i < j and A[i] < A[j]*mid:
if tmp[0] == 0 or \
tmp[0]*A[j] < tmp[1]*A[i]:
tmp[0] = A[i]
tmp[1] = A[j]
break
j += 1
count += len(A)-j
if count == K:
result[:] = tmp
return count >= K
result = []
left, right = 0.0, 1.0
while right-left > 1e-8:
mid = left + (right-left) / 2.0
if check(mid, A, K, result):
right = mid
else:
left = mid
if result:
break
return result
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/resource_tests/test_resource_definition.py | {
"start": 27836,
"end": 33810
} | class ____(PythonEnum):
VALUE_ONE = 0
OTHER = 1
DagsterEnumType = dg.Enum(
"ResourceTestEnum",
[
dg.EnumValue("VALUE_ONE", TestPythonEnum.VALUE_ONE),
dg.EnumValue("OTHER", TestPythonEnum.OTHER),
],
)
def test_resource_with_enum_in_schema():
@dg.resource(config_schema={"enum": DagsterEnumType})
def enum_resource(context):
return context.resource_config["enum"]
assert_job_runs_with_resource(enum_resource, {"enum": "VALUE_ONE"}, TestPythonEnum.VALUE_ONE)
def test_resource_with_enum_in_schema_configured():
@dg.resource(config_schema={"enum": DagsterEnumType})
def enum_resource(context):
return context.resource_config["enum"]
@dg.configured(enum_resource, {"enum": DagsterEnumType})
def passthrough_to_enum_resource(config):
return {"enum": "VALUE_ONE" if config["enum"] == TestPythonEnum.VALUE_ONE else "OTHER"}
assert_job_runs_with_resource(
passthrough_to_enum_resource, {"enum": "VALUE_ONE"}, TestPythonEnum.VALUE_ONE
)
def test_resource_run_info_exists_during_execution():
@dg.resource
def resource_checks_run_info(init_context):
assert init_context.dagster_run.run_id == init_context.run_id
return 1
assert_job_runs_with_resource(resource_checks_run_info, {}, 1)
def test_resource_needs_resource():
@dg.resource(required_resource_keys={"bar_resource"})
def foo_resource(init_context):
return init_context.resources.bar_resource + "foo"
@dg.op(required_resource_keys={"foo_resource"})
def op_requires_foo():
pass
with pytest.raises(
dg.DagsterInvariantViolationError,
match=(
"Resource with key 'bar_resource' required by resource with key 'foo_resource', but not"
" provided."
),
):
@dg.job(
resource_defs={"foo_resource": foo_resource},
)
def _fail():
op_requires_foo()
def test_resource_op_subset():
@dg.resource(required_resource_keys={"bar"})
def foo_resource(_):
return "FOO"
@dg.resource()
def bar_resource(_):
return "BAR"
@dg.resource()
def baz_resource(_):
return "BAZ"
@dg.op(required_resource_keys={"baz"})
def baz_op(_):
pass
@dg.op(required_resource_keys={"foo"})
def foo_op(_):
pass
@dg.op(required_resource_keys={"bar"})
def bar_op(_):
pass
@dg.job(
resource_defs={
"foo": foo_resource,
"baz": baz_resource,
"bar": bar_resource,
}
)
def nested():
foo_op()
bar_op()
baz_op()
assert set(nested.get_required_resource_defs().keys()) == {
"foo",
"bar",
"baz",
"io_manager",
}
assert nested.get_subset(op_selection=["foo_op"]).get_required_resource_defs().keys() == {
"foo",
"bar",
"io_manager",
}
assert nested.get_subset(op_selection=["bar_op"]).get_required_resource_defs().keys() == {
"bar",
"io_manager",
}
assert nested.get_subset(op_selection=["baz_op"]).get_required_resource_defs().keys() == {
"baz",
"io_manager",
}
def test_config_with_no_schema():
@dg.resource
def my_resource(init_context):
return init_context.resource_config
@dg.op(required_resource_keys={"resource"})
def my_op(context):
assert context.resources.resource == 5
@dg.job(resource_defs={"resource": my_resource})
def my_job():
my_op()
assert my_job.execute_in_process(run_config={"resources": {"resource": {"config": 5}}}).success
def test_configured_resource_unused():
# Ensure that if we do not use a resource on a mode definition, then we do not apply the config
# schema.
entered = []
@dg.resource
def basic_resource(_):
pass
@dg.configured(basic_resource)
def configured_resource(_):
entered.append("True")
@dg.op(required_resource_keys={"bar"})
def basic_op(_):
pass
@dg.job(resource_defs={"foo": configured_resource, "bar": basic_resource})
def basic_job():
basic_op()
basic_job.execute_in_process()
assert not entered
def test_context_manager_resource():
event_list = []
@dg.resource
@contextmanager
def cm_resource():
try:
event_list.append("foo")
yield "foo"
finally:
event_list.append("finally")
@dg.op(required_resource_keys={"cm"})
def basic(context):
event_list.append("compute")
assert context.resources.cm == "foo"
with dg.build_op_context(resources={"cm": cm_resource}) as context:
basic(context)
assert event_list == [
"foo",
"compute",
"finally",
] # Ensures that we teardown after compute
with pytest.raises(
dg.DagsterInvariantViolationError,
match=(
"At least one provided resource is a generator, but attempting to access resources "
"outside of context manager scope."
),
):
basic(dg.build_op_context(resources={"cm": cm_resource}))
@dg.graph
def call_basic():
basic()
event_list = []
assert call_basic.execute_in_process(resources={"cm": cm_resource}).success
assert event_list == ["foo", "compute", "finally"]
def test_telemetry_custom_resource():
class MyResource:
def foo(self) -> str:
return "bar"
@dg.resource
def my_resource():
return MyResource()
assert not my_resource._is_dagster_maintained() # noqa: SLF001
def test_telemetry_dagster_io_manager():
class MyResource:
def foo(self) -> str:
return "bar"
@dagster_maintained_resource
@dg.resource
def my_resource():
return MyResource()
assert my_resource._is_dagster_maintained() # noqa: SLF001
| TestPythonEnum |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 19270,
"end": 19390
} | class ____(test.TestCase, _LUSolve):
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
| LUSolveDynamic |
python | walkccc__LeetCode | solutions/1118. Number of Days in a Month/1118.py | {
"start": 0,
"end": 307
} | class ____:
def numberOfDays(self, year: int, month: int) -> int:
days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def isLeapYear(year: int) -> bool:
return year % 4 == 0 and year % 100 != 0 or year % 400 == 0
return 29 if month == 2 and isLeapYear(year) else days[month]
| Solution |
python | google__jax | jax/experimental/transfer.py | {
"start": 792,
"end": 1534
} | class ____:
"""Represents a connection to exactly one peer."""
@use_cpp_method()
def _pull_flat(self, uuid, backend, xs_flat):
raise NotImplementedError()
def pull(self, uuid: int, xs: Any) -> Any:
"""Fetches a pytree of arrays from a remote device.
Args:
uuid: identifier for the request
xs: A pytree of ShapeDtypeStruct.
Returns:
A pytree of arrays.
"""
xs_flat, tree = jax.tree.flatten(xs)
if not xs_flat:
return xs
backend = next(iter(xs_flat[0].sharding.device_set)).client
return tree.unflatten(self._pull_flat(uuid, backend, xs_flat))
if not TYPE_CHECKING:
TransferConnection = use_cpp_class(_xc._xla.TransferConnection)(TransferConnection)
| TransferConnection |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF053.py | {
"start": 2734,
"end": 2972
} | class ____[T: [a]](Generic[_A]): ...
# Existing bounds should not be deparenthesized.
# class C[T: (_Y := int)](Generic[_A]): ... # TODO: Uncomment this
# class C[T: (*a,)](Generic[_A]): ... # TODO: Uncomment this
### No errors
| C |
python | lazyprogrammer__machine_learning_examples | hmm_class/hmmc_tf.py | {
"start": 677,
"end": 7842
} | class ____:
def __init__(self, M, K, D):
self.M = M # number of hidden states
self.K = K # number of Gaussians
self.D = D # Gaussian dimensionality
def set_session(self, session):
self.session = session
def init_random(self, X):
pi0 = np.ones(self.M).astype(np.float32) # initial state distribution
A0 = np.random.randn(self.M, self.M).astype(np.float32) # state transition matrix
R0 = np.ones((self.M, self.K)).astype(np.float32) # mixture proportions
# mu0 = np.random.randn(self.M, self.K, self.D).astype(np.float32)
mu0 = np.zeros((self.M, self.K, self.D))
for j in range(self.M):
for k in range(self.K):
n = np.random.randint(X.shape[0])
t = np.random.randint(X.shape[1])
mu0[j,k] = X[n,t]
mu0 = mu0.astype(np.float32)
sigma0 = np.random.randn(self.M, self.K, self.D).astype(np.float32)
self.build(pi0, A0, R0, mu0, sigma0)
def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxR, mu, logSigma):
self.preSoftmaxPi = tf.Variable(preSoftmaxPi)
self.preSoftmaxA = tf.Variable(preSoftmaxA)
self.preSoftmaxR = tf.Variable(preSoftmaxR)
self.mu = tf.Variable(mu)
self.logSigma = tf.Variable(logSigma)
pi = tf.nn.softmax(self.preSoftmaxPi)
A = tf.nn.softmax(self.preSoftmaxA)
R = tf.nn.softmax(self.preSoftmaxR)
sigma = tf.exp(self.logSigma)
# X will be TxD
self.tfx = tf.placeholder(tf.float32, shape=(None, self.D), name='X')
# first we need to calculate B
# B[j,t] = probability of X being in state j at time t
# = Gaussian mixture P( x(t) | mu(j), sigma(j) )
# idea: first calculate components and sum
# note: we can use a for loop because M and K are not TF variables
self.mvns = []
for j in range(self.M):
self.mvns.append([])
for k in range(self.K):
self.mvns[j].append(
MVN(self.mu[j,k], sigma[j,k])
)
# note: we can use a for loop because M and K are not TF variables
B = []
for j in range(self.M):
components = []
for k in range(self.K):
components.append(
self.mvns[j][k].prob(self.tfx)
)
# why?
# because we can stack a list of tensors
# but not a list of lists of tensors
# components[j] will be K x T
# we now want to multiply by the mixture probability (R)
# result is M x T
# which gives us P( X(t) | state j )
components = tf.stack(components)
R_j = tf.reshape(R[j], [1, self.K])
p_x_t_j = tf.matmul(R_j, components)
# now components[j] is just 1 x T --> T
components = tf.reshape(p_x_t_j, [-1])
# now append it to B
B.append(components)
# should now be M x T
B = tf.stack(B)
# we should make it T x M since scan will loop through first index
B = tf.transpose(B, [1, 0])
# now perform the forward algorithm
def recurrence(old_a_old_s, B_t):
old_a = tf.reshape(old_a_old_s[0], (1, self.M))
a = tf.matmul(old_a, A) * B_t
a = tf.reshape(a, (self.M,))
s = tf.reduce_sum(a)
return (a / s), s
alpha, scale = tf.scan(
fn=recurrence,
elems=B[1:],
initializer=(pi*B[0], np.float32(1.0)),
)
# note: tensorflow is very strict about what types you pass in to initializer!
# - cannot be list, must be tuple
# - cannot be 1 (int), must be float32
self.cost_op = -tf.reduce_sum(tf.log(scale))
self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost_op)
def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxR, mu, logSigma):
# assume build has already been called
# we just assign these new variables
op1 = self.preSoftmaxPi.assign(preSoftmaxPi)
op2 = self.preSoftmaxA.assign(preSoftmaxA)
op3 = self.preSoftmaxR.assign(preSoftmaxR)
op4 = self.mu.assign(mu)
op5 = self.logSigma.assign(logSigma)
self.session.run([op1, op2, op3, op4, op5])
def fit(self, X, max_iter=10):
# train the HMM model using stochastic gradient descent
N = len(X)
print("number of train samples:", N)
costs = []
for it in range(max_iter):
if it % 1 == 0:
print("it:", it)
for n in range(N):
# this would of course be much faster if we didn't do this on
# every iteration of the loop
c = self.get_cost_multi(X).sum()
costs.append(c)
self.session.run(self.train_op, feed_dict={self.tfx: X[n]})
plt.plot(costs)
plt.show()
def get_cost(self, x):
return self.session.run(self.cost_op, feed_dict={self.tfx: x})
def get_cost_multi(self, X):
return np.array([self.get_cost(x) for x in X])
def real_signal():
spf = wave.open('helloworld.wav', 'r')
#Extract Raw Audio from Wav File
# If you right-click on the file and go to "Get Info", you can see:
# sampling rate = 16000 Hz
# bits per sample = 16
# The first is quantization in time
# The second is quantization in amplitude
# We also do this for images!
# 2^16 = 65536 is how many different sound levels we have
signal = spf.readframes(-1)
signal = np.fromstring(signal, 'Int16')
T = len(signal)
signal = (signal - signal.mean()) / signal.std()
# signal needs to be of shape N x T(n) x D
signals = signal.reshape(1, T, 1)
hmm = HMM(3, 3, 1)
hmm.init_random(signals)
init = tf.global_variables_initializer()
session = tf.InteractiveSession()
session.run(init)
hmm.set_session(session)
hmm.fit(signals, max_iter=30)
def fake_signal():
signals = get_signals()
# assume all signals are same length
signals = np.array(signals)
hmm = HMM(5, 3, signals[0].shape[1]) # M, K, D
hmm.init_random(signals)
init = tf.global_variables_initializer()
session = tf.InteractiveSession()
session.run(init)
hmm.set_session(session)
hmm.fit(signals, max_iter=30)
L = hmm.get_cost_multi(signals).sum()
print("LL for fitted params:", L)
# test in actual params
_, _, _, pi, A, R, mu, sigma = big_init()
# turn these into their "pre-softmax" forms
pi = np.log(pi)
A = np.log(A)
R = np.log(R)
M, K, D, _ = sigma.shape # need to convert full cov into diag cov
logSigma = np.zeros((M, K, D))
for j in range(M):
for k in range(D):
logSigma[j,k] = np.log(np.diag(sigma[j,k]))
hmm.set(pi, A, R, mu, logSigma)
L = hmm.get_cost_multi(signals).sum()
print("LL for actual params:", L)
if __name__ == '__main__':
# real_signal()
fake_signal()
| HMM |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 10121,
"end": 10897
} | class ____:
def setup_method(self):
self.crawler = get_crawler(
spidercls=StartUrlsSpider,
settings_dict={
"SCHEDULER_PRIORITY_QUEUE": "scrapy.pqueues.DownloaderAwarePriorityQueue",
"DUPEFILTER_CLASS": "scrapy.dupefilters.BaseDupeFilter",
},
)
@inlineCallbacks
def test_integration_downloader_aware_priority_queue(self):
with MockServer() as mockserver:
url = mockserver.url("/status?n=200", is_secure=False)
start_urls = [url] * 6
yield self.crawler.crawl(start_urls)
assert self.crawler.stats.get_value("downloader/response_count") == len(
start_urls
)
| TestIntegrationWithDownloaderAwareInMemory |
python | getsentry__sentry | src/sentry/api/serializers/models/commit.py | {
"start": 1099,
"end": 1664
} | class ____(CommitSerializerResponse):
releases: list[CommitReleaseSerializerResponse]
def get_users_for_commits(item_list, user=None) -> Mapping[str, Author]:
authors = list(
CommitAuthor.objects.get_many_from_cache([i.author_id for i in item_list if i.author_id])
)
if authors:
org_ids = {item.organization_id for item in item_list}
if len(org_ids) == 1:
return get_users_for_authors(organization_id=org_ids.pop(), authors=authors, user=user)
return {}
@register(Commit)
| CommitSerializerResponseWithReleases |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 1364,
"end": 2224
} | class ____:
"""Metadata of an object returned by a query."""
creation_time: Optional[datetime.datetime] = None
last_update_time: Optional[datetime.datetime] = None
distance: Optional[float] = None
certainty: Optional[float] = None
score: Optional[float] = None
explain_score: Optional[str] = None
is_consistent: Optional[bool] = None
rerank_score: Optional[float] = None
def _is_empty(self) -> bool:
return all(
[
self.creation_time is None,
self.last_update_time is None,
self.distance is None,
self.certainty is None,
self.score is None,
self.explain_score is None,
self.is_consistent is None,
self.rerank_score is None,
]
)
@dataclass
| MetadataReturn |
python | bokeh__bokeh | tests/unit/bokeh/model/test_util_model.py | {
"start": 1307,
"end": 1533
} | class ____(bmu.HasDocumentRef):
pass
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| ClassWithDocRef |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/concatenate_test.py | {
"start": 1450,
"end": 7022
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
dataset_ops.get_legacy_output_shapes(concatenated),
(tensor_shape.TensorShape([20]), tensor_shape.TensorShape([15]),
tensor_shape.TensorShape([])))
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(concatenated))],
[[20], [None]])
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegex(TypeError, "Incompatible dataset elements"):
input_dataset.concatenate(dataset_to_concatenate)
@combinations.generate(test_base.default_test_combinations())
def testDifferentKeys(self):
input_components = {
"foo": np.array([[1], [2], [3], [4]]),
"bar": np.array([[12], [13], [14], [15]])
}
to_concatenate_components = {
"foo": np.array([[1], [2], [3], [4]]),
"baz": np.array([[5], [6], [7], [8]])
}
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegex(TypeError, "Incompatible dataset elements"):
input_dataset.concatenate(dataset_to_concatenate)
@combinations.generate(test_base.default_test_combinations())
def testDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegex(TypeError, "Incompatible dataset elements"):
input_dataset.concatenate(dataset_to_concatenate)
@combinations.generate(test_base.default_test_combinations())
def testWindows(self):
a = dataset_ops.Dataset.range(5).window(1)
b = dataset_ops.Dataset.range(5, 10).window(1)
c = a.concatenate(b).flat_map(lambda x: x)
self.assertDatasetProduces(c, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testName(self):
a = dataset_ops.Dataset.range(5)
b = dataset_ops.Dataset.range(5, 10)
c = a.concatenate(b, name="concatenate")
self.assertDatasetProduces(c, list(range(10)))
| ConcatenateTest |
python | joke2k__faker | tests/providers/test_credit_card.py | {
"start": 5519,
"end": 6373
} | class ____:
"""Test pt_PT credit card provider methods"""
visa_pattern: Pattern = re.compile(r"4[0-9]{15}")
mastercard_pattern: Pattern = re.compile(r"5[1-5][0-9]{14}")
maestro_pattern: Pattern = re.compile(r"(50|67)[0-9]{14}")
def test_visa(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("visa")
assert self.visa_pattern.fullmatch(number)
def test_mastercard(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("mastercard")
assert self.mastercard_pattern.fullmatch(number)
def test_maestro(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("maestro")
assert self.maestro_pattern.fullmatch(number)
| TestPtPt |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 77277,
"end": 78651
} | class ____(Request):
"""
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "move_task_to_back"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(MoveTaskToBackRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| MoveTaskToBackRequest |
python | simonw__datasette | datasette/app.py | {
"start": 7662,
"end": 77147
} | class ____:
# Message constants:
INFO = 1
WARNING = 2
ERROR = 3
def __init__(
self,
files=None,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
config=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
settings=None,
secret=None,
version_note=None,
config_dir=None,
pdb=False,
crossdb=False,
nolock=False,
internal=None,
default_deny=False,
):
self._startup_invoked = False
assert config_dir is None or isinstance(
config_dir, Path
), "config_dir= should be a pathlib.Path"
self.config_dir = config_dir
self.pdb = pdb
self._secret = secret or secrets.token_hex(32)
if files is not None and isinstance(files, str):
raise ValueError("files= must be a list of paths, not a string")
self.files = tuple(files or []) + tuple(immutables or [])
if config_dir:
db_files = []
for ext in ("db", "sqlite", "sqlite3"):
db_files.extend(config_dir.glob("*.{}".format(ext)))
self.files += tuple(str(f) for f in db_files)
if (
config_dir
and (config_dir / "inspect-data.json").exists()
and not inspect_data
):
inspect_data = json.loads((config_dir / "inspect-data.json").read_text())
if not immutables:
immutable_filenames = [i["file"] for i in inspect_data.values()]
immutables = [
f for f in self.files if Path(f).name in immutable_filenames
]
self.inspect_data = inspect_data
self.immutables = set(immutables or [])
self.databases = collections.OrderedDict()
self.actions = {} # .invoke_startup() will populate this
try:
self._refresh_schemas_lock = asyncio.Lock()
except RuntimeError as rex:
# Workaround for intermittent test failure, see:
# https://github.com/simonw/datasette/issues/1802
if "There is no current event loop in thread" in str(rex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self._refresh_schemas_lock = asyncio.Lock()
else:
raise
self.crossdb = crossdb
self.nolock = nolock
if memory or crossdb or not self.files:
self.add_database(
Database(self, is_mutable=False, is_memory=True), name="_memory"
)
for file in self.files:
self.add_database(
Database(self, file, is_mutable=file not in self.immutables)
)
self.internal_db_created = False
if internal is None:
self._internal_database = Database(self, memory_name=secrets.token_hex())
else:
self._internal_database = Database(self, path=internal, mode="rwc")
self._internal_database.name = INTERNAL_DB_NAME
self.cache_headers = cache_headers
self.cors = cors
config_files = []
metadata_files = []
if config_dir:
metadata_files = [
config_dir / filename
for filename in ("metadata.json", "metadata.yaml", "metadata.yml")
if (config_dir / filename).exists()
]
config_files = [
config_dir / filename
for filename in ("datasette.json", "datasette.yaml", "datasette.yml")
if (config_dir / filename).exists()
]
if config_dir and metadata_files and not metadata:
with metadata_files[0].open() as fp:
metadata = parse_metadata(fp.read())
if config_dir and config_files and not config:
with config_files[0].open() as fp:
config = parse_metadata(fp.read())
# Move any "plugins" and "allow" settings from metadata to config - updates them in place
metadata = metadata or {}
config = config or {}
metadata, config = move_plugins_and_allow(metadata, config)
# Now migrate any known table configuration settings over as well
metadata, config = move_table_config(metadata, config)
self._metadata_local = metadata or {}
self.sqlite_extensions = []
for extension in sqlite_extensions or []:
# Resolve spatialite, if requested
if extension == "spatialite":
# Could raise SpatialiteNotFound
self.sqlite_extensions.append(find_spatialite())
else:
self.sqlite_extensions.append(extension)
if config_dir and (config_dir / "templates").is_dir() and not template_dir:
template_dir = str((config_dir / "templates").resolve())
self.template_dir = template_dir
if config_dir and (config_dir / "plugins").is_dir() and not plugins_dir:
plugins_dir = str((config_dir / "plugins").resolve())
self.plugins_dir = plugins_dir
if config_dir and (config_dir / "static").is_dir() and not static_mounts:
static_mounts = [("static", str((config_dir / "static").resolve()))]
self.static_mounts = static_mounts or []
if config_dir and (config_dir / "datasette.json").exists() and not config:
config = json.loads((config_dir / "datasette.json").read_text())
config = config or {}
config_settings = config.get("settings") or {}
# Validate settings from config file
for key, value in config_settings.items():
if key not in DEFAULT_SETTINGS:
raise StartupError(f"Invalid setting '{key}' in config file")
# Validate type matches expected type from DEFAULT_SETTINGS
if value is not None: # Allow None/null values
expected_type = type(DEFAULT_SETTINGS[key])
actual_type = type(value)
if actual_type != expected_type:
raise StartupError(
f"Setting '{key}' in config file has incorrect type. "
f"Expected {expected_type.__name__}, got {actual_type.__name__}. "
f"Value: {value!r}. "
f"Hint: In YAML/JSON config files, remove quotes from boolean and integer values."
)
# Validate settings from constructor parameter
if settings:
for key, value in settings.items():
if key not in DEFAULT_SETTINGS:
raise StartupError(f"Invalid setting '{key}' in settings parameter")
if value is not None:
expected_type = type(DEFAULT_SETTINGS[key])
actual_type = type(value)
if actual_type != expected_type:
raise StartupError(
f"Setting '{key}' in settings parameter has incorrect type. "
f"Expected {expected_type.__name__}, got {actual_type.__name__}. "
f"Value: {value!r}"
)
self.config = config
# CLI settings should overwrite datasette.json settings
self._settings = dict(DEFAULT_SETTINGS, **(config_settings), **(settings or {}))
self.renderers = {} # File extension -> (renderer, can_render) functions
self.version_note = version_note
if self.setting("num_sql_threads") == 0:
self.executor = None
else:
self.executor = futures.ThreadPoolExecutor(
max_workers=self.setting("num_sql_threads")
)
self.max_returned_rows = self.setting("max_returned_rows")
self.sql_time_limit_ms = self.setting("sql_time_limit_ms")
self.page_size = self.setting("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filepath in glob.glob(os.path.join(self.plugins_dir, "*.py")):
if not os.path.isfile(filepath):
continue
mod = module_from_path(filepath, name=os.path.basename(filepath))
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
# Configure Jinja
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
plugin_template_paths = [
plugin["templates_path"]
for plugin in get_plugins()
if plugin["templates_path"]
]
template_paths.extend(plugin_template_paths)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
environment = Environment(
loader=template_loader,
autoescape=True,
enable_async=True,
# undefined=StrictUndefined,
)
environment.filters["escape_css_string"] = escape_css_string
environment.filters["quote_plus"] = urllib.parse.quote_plus
self._jinja_env = environment
environment.filters["escape_sqlite"] = escape_sqlite
environment.filters["to_css_class"] = to_css_class
self._register_renderers()
self._permission_checks = collections.deque(maxlen=200)
self._root_token = secrets.token_hex(32)
self.root_enabled = False
self.default_deny = default_deny
self.client = DatasetteClient(self)
async def apply_metadata_json(self):
# Apply any metadata entries from metadata.json to the internal tables
# step 1: top-level metadata
for key in self._metadata_local or {}:
if key == "databases":
continue
value = self._metadata_local[key]
await self.set_instance_metadata(key, _to_string(value))
# step 2: database-level metadata
for dbname, db in self._metadata_local.get("databases", {}).items():
for key, value in db.items():
if key in ("tables", "queries"):
continue
await self.set_database_metadata(dbname, key, _to_string(value))
# step 3: table-level metadata
for tablename, table in db.get("tables", {}).items():
for key, value in table.items():
if key == "columns":
continue
await self.set_resource_metadata(
dbname, tablename, key, _to_string(value)
)
# step 4: column-level metadata (only descriptions in metadata.json)
for columnname, column_description in table.get("columns", {}).items():
await self.set_column_metadata(
dbname, tablename, columnname, "description", column_description
)
# TODO(alex) is metadata.json was loaded in, and --internal is not memory, then log
# a warning to user that they should delete their metadata.json file
def get_jinja_environment(self, request: Request = None) -> Environment:
environment = self._jinja_env
if request:
for environment in pm.hook.jinja2_environment_from_request(
datasette=self, request=request, env=environment
):
pass
return environment
def get_action(self, name_or_abbr: str):
"""
Returns an Action object for the given name or abbreviation. Returns None if not found.
"""
if name_or_abbr in self.actions:
return self.actions[name_or_abbr]
# Try abbreviation
for action in self.actions.values():
if action.abbr == name_or_abbr:
return action
return None
async def refresh_schemas(self):
if self._refresh_schemas_lock.locked():
return
async with self._refresh_schemas_lock:
await self._refresh_schemas()
async def _refresh_schemas(self):
internal_db = self.get_internal_database()
if not self.internal_db_created:
await init_internal_db(internal_db)
await self.apply_metadata_json()
self.internal_db_created = True
current_schema_versions = {
row["database_name"]: row["schema_version"]
for row in await internal_db.execute(
"select database_name, schema_version from catalog_databases"
)
}
for database_name, db in self.databases.items():
schema_version = (await db.execute("PRAGMA schema_version")).first()[0]
# Compare schema versions to see if we should skip it
if schema_version == current_schema_versions.get(database_name):
continue
placeholders = "(?, ?, ?, ?)"
values = [database_name, str(db.path), db.is_memory, schema_version]
if db.path is None:
placeholders = "(?, null, ?, ?)"
values = [database_name, db.is_memory, schema_version]
await internal_db.execute_write(
"""
INSERT OR REPLACE INTO catalog_databases (database_name, path, is_memory, schema_version)
VALUES {}
""".format(
placeholders
),
values,
)
await populate_schema_tables(internal_db, db)
@property
def urls(self):
return Urls(self)
@property
def pm(self):
"""
Return the global plugin manager instance.
This provides access to the pluggy PluginManager that manages all
Datasette plugins and hooks. Use datasette.pm.hook.hook_name() to
call plugin hooks.
"""
return pm
async def invoke_startup(self):
# This must be called for Datasette to be in a usable state
if self._startup_invoked:
return
# Register event classes
event_classes = []
for hook in pm.hook.register_events(datasette=self):
extra_classes = await await_me_maybe(hook)
if extra_classes:
event_classes.extend(extra_classes)
self.event_classes = tuple(event_classes)
# Register actions, but watch out for duplicate name/abbr
action_names = {}
action_abbrs = {}
for hook in pm.hook.register_actions(datasette=self):
if hook:
for action in hook:
if (
action.name in action_names
and action != action_names[action.name]
):
raise StartupError(
"Duplicate action name: {}".format(action.name)
)
if (
action.abbr
and action.abbr in action_abbrs
and action != action_abbrs[action.abbr]
):
raise StartupError(
"Duplicate action abbr: {}".format(action.abbr)
)
action_names[action.name] = action
if action.abbr:
action_abbrs[action.abbr] = action
self.actions[action.name] = action
for hook in pm.hook.prepare_jinja2_environment(
env=self._jinja_env, datasette=self
):
await await_me_maybe(hook)
for hook in pm.hook.startup(datasette=self):
await await_me_maybe(hook)
self._startup_invoked = True
def sign(self, value, namespace="default"):
return URLSafeSerializer(self._secret, namespace).dumps(value)
def unsign(self, signed, namespace="default"):
return URLSafeSerializer(self._secret, namespace).loads(signed)
def in_client(self) -> bool:
"""Check if the current code is executing within a datasette.client request.
Returns:
bool: True if currently executing within a datasette.client request, False otherwise.
"""
return _in_datasette_client.get()
def create_token(
self,
actor_id: str,
*,
expires_after: int | None = None,
restrict_all: Iterable[str] | None = None,
restrict_database: Dict[str, Iterable[str]] | None = None,
restrict_resource: Dict[str, Dict[str, Iterable[str]]] | None = None,
):
token = {"a": actor_id, "t": int(time.time())}
if expires_after:
token["d"] = expires_after
def abbreviate_action(action):
# rename to abbr if possible
action_obj = self.actions.get(action)
if not action_obj:
return action
return action_obj.abbr or action
if expires_after:
token["d"] = expires_after
if restrict_all or restrict_database or restrict_resource:
token["_r"] = {}
if restrict_all:
token["_r"]["a"] = [abbreviate_action(a) for a in restrict_all]
if restrict_database:
token["_r"]["d"] = {}
for database, actions in restrict_database.items():
token["_r"]["d"][database] = [abbreviate_action(a) for a in actions]
if restrict_resource:
token["_r"]["r"] = {}
for database, resources in restrict_resource.items():
for resource, actions in resources.items():
token["_r"]["r"].setdefault(database, {})[resource] = [
abbreviate_action(a) for a in actions
]
return "dstok_{}".format(self.sign(token, namespace="token"))
def get_database(self, name=None, route=None):
if route is not None:
matches = [db for db in self.databases.values() if db.route == route]
if not matches:
raise KeyError
return matches[0]
if name is None:
name = [key for key in self.databases.keys()][0]
return self.databases[name]
def add_database(self, db, name=None, route=None):
new_databases = self.databases.copy()
if name is None:
# Pick a unique name for this database
suggestion = db.suggest_name()
name = suggestion
else:
suggestion = name
i = 2
while name in self.databases:
name = "{}_{}".format(suggestion, i)
i += 1
db.name = name
db.route = route or name
new_databases[name] = db
# don't mutate! that causes race conditions with live import
self.databases = new_databases
return db
def add_memory_database(self, memory_name, name=None, route=None):
return self.add_database(
Database(self, memory_name=memory_name), name=name, route=route
)
def remove_database(self, name):
self.get_database(name).close()
new_databases = self.databases.copy()
new_databases.pop(name)
self.databases = new_databases
def setting(self, key):
return self._settings.get(key, None)
def settings_dict(self):
# Returns a fully resolved settings dictionary, useful for templates
return {option.name: self.setting(option.name) for option in SETTINGS}
def _metadata_recursive_update(self, orig, updated):
if not isinstance(orig, dict) or not isinstance(updated, dict):
return orig
for key, upd_value in updated.items():
if isinstance(upd_value, dict) and isinstance(orig.get(key), dict):
orig[key] = self._metadata_recursive_update(orig[key], upd_value)
else:
orig[key] = upd_value
return orig
async def get_instance_metadata(self):
rows = await self.get_internal_database().execute(
"""
SELECT
key,
value
FROM metadata_instance
"""
)
return dict(rows)
async def get_database_metadata(self, database_name: str):
rows = await self.get_internal_database().execute(
"""
SELECT
key,
value
FROM metadata_databases
WHERE database_name = ?
""",
[database_name],
)
return dict(rows)
async def get_resource_metadata(self, database_name: str, resource_name: str):
rows = await self.get_internal_database().execute(
"""
SELECT
key,
value
FROM metadata_resources
WHERE database_name = ?
AND resource_name = ?
""",
[database_name, resource_name],
)
return dict(rows)
async def get_column_metadata(
self, database_name: str, resource_name: str, column_name: str
):
rows = await self.get_internal_database().execute(
"""
SELECT
key,
value
FROM metadata_columns
WHERE database_name = ?
AND resource_name = ?
AND column_name = ?
""",
[database_name, resource_name, column_name],
)
return dict(rows)
async def set_instance_metadata(self, key: str, value: str):
# TODO upsert only supported on SQLite 3.24.0 (2018-06-04)
await self.get_internal_database().execute_write(
"""
INSERT INTO metadata_instance(key, value)
VALUES(?, ?)
ON CONFLICT(key) DO UPDATE SET value = excluded.value;
""",
[key, value],
)
async def set_database_metadata(self, database_name: str, key: str, value: str):
# TODO upsert only supported on SQLite 3.24.0 (2018-06-04)
await self.get_internal_database().execute_write(
"""
INSERT INTO metadata_databases(database_name, key, value)
VALUES(?, ?, ?)
ON CONFLICT(database_name, key) DO UPDATE SET value = excluded.value;
""",
[database_name, key, value],
)
async def set_resource_metadata(
self, database_name: str, resource_name: str, key: str, value: str
):
# TODO upsert only supported on SQLite 3.24.0 (2018-06-04)
await self.get_internal_database().execute_write(
"""
INSERT INTO metadata_resources(database_name, resource_name, key, value)
VALUES(?, ?, ?, ?)
ON CONFLICT(database_name, resource_name, key) DO UPDATE SET value = excluded.value;
""",
[database_name, resource_name, key, value],
)
async def set_column_metadata(
self,
database_name: str,
resource_name: str,
column_name: str,
key: str,
value: str,
):
# TODO upsert only supported on SQLite 3.24.0 (2018-06-04)
await self.get_internal_database().execute_write(
"""
INSERT INTO metadata_columns(database_name, resource_name, column_name, key, value)
VALUES(?, ?, ?, ?, ?)
ON CONFLICT(database_name, resource_name, column_name, key) DO UPDATE SET value = excluded.value;
""",
[database_name, resource_name, column_name, key, value],
)
def get_internal_database(self):
return self._internal_database
def plugin_config(self, plugin_name, database=None, table=None, fallback=True):
"""Return config for plugin, falling back from specified database/table"""
if database is None and table is None:
config = self._plugin_config_top(plugin_name)
else:
config = self._plugin_config_nested(plugin_name, database, table, fallback)
return resolve_env_secrets(config, os.environ)
def _plugin_config_top(self, plugin_name):
"""Returns any top-level plugin configuration for the specified plugin."""
return ((self.config or {}).get("plugins") or {}).get(plugin_name)
def _plugin_config_nested(self, plugin_name, database, table=None, fallback=True):
"""Returns any database or table-level plugin configuration for the specified plugin."""
db_config = ((self.config or {}).get("databases") or {}).get(database)
# if there's no db-level configuration, then return early, falling back to top-level if needed
if not db_config:
return self._plugin_config_top(plugin_name) if fallback else None
db_plugin_config = (db_config.get("plugins") or {}).get(plugin_name)
if table:
table_plugin_config = (
((db_config.get("tables") or {}).get(table) or {}).get("plugins") or {}
).get(plugin_name)
# fallback to db_config or top-level config, in that order, if needed
if table_plugin_config is None and fallback:
return db_plugin_config or self._plugin_config_top(plugin_name)
return table_plugin_config
# fallback to top-level if needed
if db_plugin_config is None and fallback:
self._plugin_config_top(plugin_name)
return db_plugin_config
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
with open(os.path.join(str(app_root), "datasette/static/app.css")) as fp:
self._app_css_hash = hashlib.sha1(fp.read().encode("utf8")).hexdigest()[
:6
]
return self._app_css_hash
async def get_canned_queries(self, database_name, actor):
queries = {}
for more_queries in pm.hook.canned_queries(
datasette=self,
database=database_name,
actor=actor,
):
more_queries = await await_me_maybe(more_queries)
queries.update(more_queries or {})
# Fix any {"name": "select ..."} queries to be {"name": {"sql": "select ..."}}
for key in queries:
if not isinstance(queries[key], dict):
queries[key] = {"sql": queries[key]}
# Also make sure "name" is available:
queries[key]["name"] = key
return queries
async def get_canned_query(self, database_name, query_name, actor):
queries = await self.get_canned_queries(database_name, actor)
query = queries.get(query_name)
if query:
return query
def _prepare_connection(self, conn, database):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
if self.sqlite_extensions and database != INTERNAL_DB_NAME:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
# "extension" is either a string path to the extension
# or a 2-item tuple that specifies which entrypoint to load.
if isinstance(extension, tuple):
path, entrypoint = extension
conn.execute("SELECT load_extension(?, ?)", [path, entrypoint])
else:
conn.execute("SELECT load_extension(?)", [extension])
if self.setting("cache_size_kb"):
conn.execute(f"PRAGMA cache_size=-{self.setting('cache_size_kb')}")
# pylint: disable=no-member
if database != INTERNAL_DB_NAME:
pm.hook.prepare_connection(conn=conn, database=database, datasette=self)
# If self.crossdb and this is _memory, connect the first SQLITE_LIMIT_ATTACHED databases
if self.crossdb and database == "_memory":
count = 0
for db_name, db in self.databases.items():
if count >= SQLITE_LIMIT_ATTACHED or db.is_memory:
continue
sql = 'ATTACH DATABASE "file:{path}?{qs}" AS [{name}];'.format(
path=db.path,
qs="mode=ro" if db.is_mutable else "immutable=1",
name=db_name,
)
conn.execute(sql)
count += 1
def add_message(self, request, message, type=INFO):
if not hasattr(request, "_messages"):
request._messages = []
request._messages_should_clear = False
request._messages.append((message, type))
def _write_messages_to_response(self, request, response):
if getattr(request, "_messages", None):
# Set those messages
response.set_cookie("ds_messages", self.sign(request._messages, "messages"))
elif getattr(request, "_messages_should_clear", False):
response.set_cookie("ds_messages", "", expires=0, max_age=0)
def _show_messages(self, request):
if getattr(request, "_messages", None):
request._messages_should_clear = True
messages = request._messages
request._messages = []
return messages
else:
return []
async def _crumb_items(self, request, table=None, database=None):
crumbs = []
actor = None
if request:
actor = request.actor
# Top-level link
if await self.allowed(action="view-instance", actor=actor):
crumbs.append({"href": self.urls.instance(), "label": "home"})
# Database link
if database:
if await self.allowed(
action="view-database",
resource=DatabaseResource(database=database),
actor=actor,
):
crumbs.append(
{
"href": self.urls.database(database),
"label": database,
}
)
# Table link
if table:
assert database, "table= requires database="
if await self.allowed(
action="view-table",
resource=TableResource(database=database, table=table),
actor=actor,
):
crumbs.append(
{
"href": self.urls.table(database, table),
"label": table,
}
)
return crumbs
async def actors_from_ids(
self, actor_ids: Iterable[str | int]
) -> Dict[int | str, Dict]:
result = pm.hook.actors_from_ids(datasette=self, actor_ids=actor_ids)
if result is None:
# Do the default thing
return {actor_id: {"id": actor_id} for actor_id in actor_ids}
result = await await_me_maybe(result)
return result
async def track_event(self, event: Event):
assert isinstance(event, self.event_classes), "Invalid event type: {}".format(
type(event)
)
for hook in pm.hook.track_event(datasette=self, event=event):
await await_me_maybe(hook)
def resource_for_action(self, action: str, parent: str | None, child: str | None):
"""
Create a Resource instance for the given action with parent/child values.
Looks up the action's resource_class and instantiates it with the
provided parent and child identifiers.
Args:
action: The action name (e.g., "view-table", "view-query")
parent: The parent resource identifier (e.g., database name)
child: The child resource identifier (e.g., table/query name)
Returns:
A Resource instance of the appropriate subclass
Raises:
ValueError: If the action is unknown
"""
from datasette.permissions import Resource
action_obj = self.actions.get(action)
if not action_obj:
raise ValueError(f"Unknown action: {action}")
resource_class = action_obj.resource_class
instance = object.__new__(resource_class)
Resource.__init__(instance, parent=parent, child=child)
return instance
async def check_visibility(
self,
actor: dict,
action: str,
resource: "Resource" | None = None,
):
"""
Check if actor can see a resource and if it's private.
Returns (visible, private) tuple:
- visible: bool - can the actor see it?
- private: bool - if visible, can anonymous users NOT see it?
"""
from datasette.permissions import Resource
# Validate that resource is a Resource object or None
if resource is not None and not isinstance(resource, Resource):
raise TypeError(f"resource must be a Resource subclass instance or None.")
# Check if actor can see it
if not await self.allowed(action=action, resource=resource, actor=actor):
return False, False
# Check if anonymous user can see it (for "private" flag)
if not await self.allowed(action=action, resource=resource, actor=None):
# Actor can see it but anonymous cannot - it's private
return True, True
# Both actor and anonymous can see it - it's public
return True, False
async def allowed_resources_sql(
self,
*,
action: str,
actor: dict | None = None,
parent: str | None = None,
include_is_private: bool = False,
) -> ResourcesSQL:
"""
Build SQL query to get all resources the actor can access for the given action.
Args:
action: The action name (e.g., "view-table")
actor: The actor dict (or None for unauthenticated)
parent: Optional parent filter (e.g., database name) to limit results
include_is_private: If True, include is_private column showing if anonymous cannot access
Returns a namedtuple of (query: str, params: dict) that can be executed against the internal database.
The query returns rows with (parent, child, reason) columns, plus is_private if requested.
Example:
query, params = await datasette.allowed_resources_sql(
action="view-table",
actor=actor,
parent="mydb",
include_is_private=True
)
result = await datasette.get_internal_database().execute(query, params)
"""
from datasette.utils.actions_sql import build_allowed_resources_sql
action_obj = self.actions.get(action)
if not action_obj:
raise ValueError(f"Unknown action: {action}")
sql, params = await build_allowed_resources_sql(
self, actor, action, parent=parent, include_is_private=include_is_private
)
return ResourcesSQL(sql, params)
async def allowed_resources(
self,
action: str,
actor: dict | None = None,
*,
parent: str | None = None,
include_is_private: bool = False,
include_reasons: bool = False,
limit: int = 100,
next: str | None = None,
) -> PaginatedResources:
"""
Return paginated resources the actor can access for the given action.
Uses SQL with keyset pagination to efficiently filter resources.
Returns PaginatedResources with list of Resource instances and pagination metadata.
Args:
action: The action name (e.g., "view-table")
actor: The actor dict (or None for unauthenticated)
parent: Optional parent filter (e.g., database name) to limit results
include_is_private: If True, adds a .private attribute to each Resource
include_reasons: If True, adds a .reasons attribute with List[str] of permission reasons
limit: Maximum number of results to return (1-1000, default 100)
next: Keyset token from previous page for pagination
Returns:
PaginatedResources with:
- resources: List of Resource objects for this page
- next: Token for next page (None if no more results)
Example:
# Get first page of tables
page = await datasette.allowed_resources("view-table", actor, limit=50)
for table in page.resources:
print(f"{table.parent}/{table.child}")
# Get next page
if page.next:
next_page = await datasette.allowed_resources(
"view-table", actor, limit=50, next=page.next
)
# With reasons for debugging
page = await datasette.allowed_resources(
"view-table", actor, include_reasons=True
)
for table in page.resources:
print(f"{table.child}: {table.reasons}")
# Iterate through all results with async generator
page = await datasette.allowed_resources("view-table", actor)
async for table in page.all():
print(table.child)
"""
action_obj = self.actions.get(action)
if not action_obj:
raise ValueError(f"Unknown action: {action}")
# Validate and cap limit
limit = min(max(1, limit), 1000)
# Get base SQL query
query, params = await self.allowed_resources_sql(
action=action,
actor=actor,
parent=parent,
include_is_private=include_is_private,
)
# Add keyset pagination WHERE clause if next token provided
if next:
try:
components = urlsafe_components(next)
if len(components) >= 2:
last_parent, last_child = components[0], components[1]
# Keyset condition: (parent > last) OR (parent = last AND child > last)
keyset_where = """
(parent > :keyset_parent OR
(parent = :keyset_parent AND child > :keyset_child))
"""
# Wrap original query and add keyset filter
query = f"SELECT * FROM ({query}) WHERE {keyset_where}"
params["keyset_parent"] = last_parent
params["keyset_child"] = last_child
except (ValueError, KeyError):
# Invalid token - ignore and start from beginning
pass
# Add LIMIT (fetch limit+1 to detect if there are more results)
# Note: query from allowed_resources_sql() already includes ORDER BY parent, child
query = f"{query} LIMIT :limit"
params["limit"] = limit + 1
# Execute query
result = await self.get_internal_database().execute(query, params)
rows = list(result.rows)
# Check if truncated (got more than limit rows)
truncated = len(rows) > limit
if truncated:
rows = rows[:limit] # Remove the extra row
# Build Resource objects with optional attributes
resources = []
for row in rows:
# row[0]=parent, row[1]=child, row[2]=reason, row[3]=is_private (if requested)
resource = self.resource_for_action(action, parent=row[0], child=row[1])
# Add reasons if requested
if include_reasons:
reason_json = row[2]
try:
reasons_array = (
json.loads(reason_json) if isinstance(reason_json, str) else []
)
resource.reasons = [r for r in reasons_array if r is not None]
except (json.JSONDecodeError, TypeError):
resource.reasons = [reason_json] if reason_json else []
# Add private flag if requested
if include_is_private:
resource.private = bool(row[3])
resources.append(resource)
# Generate next token if there are more results
next_token = None
if truncated and resources:
last_resource = resources[-1]
# Use tilde-encoding like table pagination
next_token = "{},{}".format(
tilde_encode(str(last_resource.parent)),
tilde_encode(str(last_resource.child)),
)
return PaginatedResources(
resources=resources,
next=next_token,
_datasette=self,
_action=action,
_actor=actor,
_parent=parent,
_include_is_private=include_is_private,
_include_reasons=include_reasons,
_limit=limit,
)
async def allowed(
self,
*,
action: str,
resource: "Resource" = None,
actor: dict | None = None,
) -> bool:
"""
Check if actor can perform action on specific resource.
Uses SQL to check permission for a single resource without fetching all resources.
This is efficient - it does NOT call allowed_resources() and check membership.
For global actions, resource should be None (or omitted).
Example:
from datasette.resources import TableResource
can_view = await datasette.allowed(
action="view-table",
resource=TableResource(database="analytics", table="users"),
actor=actor
)
# For global actions, resource can be omitted:
can_debug = await datasette.allowed(action="permissions-debug", actor=actor)
"""
from datasette.utils.actions_sql import check_permission_for_resource
# For global actions, resource remains None
# Check if this action has also_requires - if so, check that action first
action_obj = self.actions.get(action)
if action_obj and action_obj.also_requires:
# Must have the required action first
if not await self.allowed(
action=action_obj.also_requires,
resource=resource,
actor=actor,
):
return False
# For global actions, resource is None
parent = resource.parent if resource else None
child = resource.child if resource else None
result = await check_permission_for_resource(
datasette=self,
actor=actor,
action=action,
parent=parent,
child=child,
)
# Log the permission check for debugging
self._permission_checks.append(
PermissionCheck(
when=datetime.datetime.now(datetime.timezone.utc).isoformat(),
actor=actor,
action=action,
parent=parent,
child=child,
result=result,
)
)
return result
async def ensure_permission(
self,
*,
action: str,
resource: "Resource" = None,
actor: dict | None = None,
):
"""
Check if actor can perform action on resource, raising Forbidden if not.
This is a convenience wrapper around allowed() that raises Forbidden
instead of returning False. Use this when you want to enforce a permission
check and halt execution if it fails.
Example:
from datasette.resources import TableResource
# Will raise Forbidden if actor cannot view the table
await datasette.ensure_permission(
action="view-table",
resource=TableResource(database="analytics", table="users"),
actor=request.actor
)
# For instance-level actions, resource can be omitted:
await datasette.ensure_permission(
action="permissions-debug",
actor=request.actor
)
"""
if not await self.allowed(action=action, resource=resource, actor=actor):
raise Forbidden(action)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
log_sql_errors=True,
):
return await self.databases[db_name].execute(
sql,
params=params,
truncate=truncate,
custom_time_limit=custom_time_limit,
page_size=page_size,
log_sql_errors=log_sql_errors,
)
async def expand_foreign_keys(self, actor, database, table, column, values):
"""Returns dict mapping (column, value) -> label"""
labeled_fks = {}
db = self.databases[database]
foreign_keys = await db.foreign_keys_for_table(table)
# Find the foreign_key for this column
try:
fk = [
foreign_key
for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
# Ensure user has permission to view the referenced table
from datasette.resources import TableResource
other_table = fk["other_table"]
other_column = fk["other_column"]
visible, _ = await self.check_visibility(
actor,
action="view-table",
resource=TableResource(database=database, table=other_table),
)
if not visible:
return {}
label_column = await db.label_column_for_table(other_table)
if not label_column:
return {(fk["column"], value): str(value) for value in values}
labeled_fks = {}
sql = """
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
""".format(
other_column=escape_sqlite(other_column),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(other_table),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(database, sql, list(set(values)))
except QueryInterrupted:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.setting("force_https_urls"):
url = "https://" + url[len("http://") :]
return url
def _connected_databases(self):
return [
{
"name": d.name,
"route": d.route,
"path": d.path,
"size": d.size,
"is_mutable": d.is_mutable,
"is_memory": d.is_memory,
"hash": d.hash,
}
for name, d in self.databases.items()
]
def _versions(self):
conn = sqlite3.connect(":memory:")
self._prepare_connection(conn, "_memory")
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {"json1": detect_json1(conn)}
for extension, testsql, hasversion in (
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# More details on SpatiaLite
if "spatialite" in sqlite_extensions:
spatialite_details = {}
for fn in SPATIALITE_FUNCTIONS:
try:
result = conn.execute("select {}()".format(fn))
spatialite_details[fn] = result.fetchone()[0]
except Exception as e:
spatialite_details[fn] = {"error": str(e)}
sqlite_extensions["spatialite"] = spatialite_details
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
try:
# Optional import to avoid breaking Pyodide
# https://github.com/simonw/datasette/issues/1733#issuecomment-1115268245
import uvicorn
uvicorn_version = uvicorn.__version__
except ImportError:
uvicorn_version = None
info = {
"python": {
"version": ".".join(map(str, sys.version_info[:3])),
"full": sys.version,
},
"datasette": datasette_version,
"asgi": "3.0",
"uvicorn": uvicorn_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
if using_pysqlite3:
for package in ("pysqlite3", "pysqlite3-binary"):
try:
info["pysqlite3"] = importlib.metadata.version(package)
break
except importlib.metadata.PackageNotFoundError:
pass
return info
def _plugins(self, request=None, all=False):
ps = list(get_plugins())
should_show_all = False
if request is not None:
should_show_all = request.args.get("all")
else:
should_show_all = all
if not should_show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
ps.sort(key=lambda p: p["name"])
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
"hooks": list(sorted(set(p["hooks"]))),
}
for p in ps
]
def _threads(self):
if self.setting("num_sql_threads") == 0:
return {"num_threads": 0, "threads": []}
threads = list(threading.enumerate())
d = {
"num_threads": len(threads),
"threads": [
{"name": t.name, "ident": t.ident, "daemon": t.daemon} for t in threads
],
}
tasks = asyncio.all_tasks()
d.update(
{
"num_tasks": len(tasks),
"tasks": [_cleaner_task_str(t) for t in tasks],
}
)
return d
def _actor(self, request):
return {"actor": request.actor}
def _actions(self):
return [
{
"name": action.name,
"abbr": action.abbr,
"description": action.description,
"takes_parent": action.takes_parent,
"takes_child": action.takes_child,
"resource_class": (
action.resource_class.__name__ if action.resource_class else None
),
"also_requires": action.also_requires,
}
for action in sorted(self.actions.values(), key=lambda a: a.name)
]
async def table_config(self, database: str, table: str) -> dict:
"""Return dictionary of configuration for specified table"""
return (
(self.config or {})
.get("databases", {})
.get(database, {})
.get("tables", {})
.get(table, {})
)
def _register_renderers(self):
"""Register output renderers which output data in custom formats."""
# Built-in renderers
self.renderers["json"] = (json_renderer, lambda: True)
# Hooks
hook_renderers = []
# pylint: disable=no-member
for hook in pm.hook.register_output_renderer(datasette=self):
if type(hook) is list:
hook_renderers += hook
else:
hook_renderers.append(hook)
for renderer in hook_renderers:
self.renderers[renderer["extension"]] = (
# It used to be called "callback" - remove this in Datasette 1.0
renderer.get("render") or renderer["callback"],
renderer.get("can_render") or (lambda: True),
)
async def render_template(
self,
templates: List[str] | str | Template,
context: Dict[str, Any] | Context | None = None,
request: Request | None = None,
view_name: str | None = None,
):
if not self._startup_invoked:
raise Exception("render_template() called before await ds.invoke_startup()")
context = context or {}
if isinstance(templates, Template):
template = templates
else:
if isinstance(templates, str):
templates = [templates]
template = self.get_jinja_environment(request).select_template(templates)
if dataclasses.is_dataclass(context):
context = dataclasses.asdict(context)
body_scripts = []
# pylint: disable=no-member
for extra_script in pm.hook.extra_body_script(
template=template.name,
database=context.get("database"),
table=context.get("table"),
columns=context.get("columns"),
view_name=view_name,
request=request,
datasette=self,
):
extra_script = await await_me_maybe(extra_script)
if isinstance(extra_script, dict):
script = extra_script["script"]
module = bool(extra_script.get("module"))
else:
script = extra_script
module = False
body_scripts.append({"script": Markup(script), "module": module})
extra_template_vars = {}
# pylint: disable=no-member
for extra_vars in pm.hook.extra_template_vars(
template=template.name,
database=context.get("database"),
table=context.get("table"),
columns=context.get("columns"),
view_name=view_name,
request=request,
datasette=self,
):
extra_vars = await await_me_maybe(extra_vars)
assert isinstance(extra_vars, dict), "extra_vars is of type {}".format(
type(extra_vars)
)
extra_template_vars.update(extra_vars)
async def menu_links():
links = []
for hook in pm.hook.menu_links(
datasette=self,
actor=request.actor if request else None,
request=request or None,
):
extra_links = await await_me_maybe(hook)
if extra_links:
links.extend(extra_links)
return links
template_context = {
**context,
**{
"request": request,
"crumb_items": self._crumb_items,
"urls": self.urls,
"actor": request.actor if request else None,
"menu_links": menu_links,
"display_actor": display_actor,
"show_logout": request is not None
and "ds_actor" in request.cookies
and request.actor,
"app_css_hash": self.app_css_hash(),
"zip": zip,
"body_scripts": body_scripts,
"format_bytes": format_bytes,
"show_messages": lambda: self._show_messages(request),
"extra_css_urls": await self._asset_urls(
"extra_css_urls", template, context, request, view_name
),
"extra_js_urls": await self._asset_urls(
"extra_js_urls", template, context, request, view_name
),
"base_url": self.setting("base_url"),
"csrftoken": request.scope["csrftoken"] if request else lambda: "",
"datasette_version": __version__,
},
**extra_template_vars,
}
if request and request.args.get("_context") and self.setting("template_debug"):
return "<pre>{}</pre>".format(
escape(json.dumps(template_context, default=repr, indent=4))
)
return await template.render_async(template_context)
def set_actor_cookie(
self, response: Response, actor: dict, expire_after: int | None = None
):
data = {"a": actor}
if expire_after:
expires_at = int(time.time()) + (24 * 60 * 60)
data["e"] = baseconv.base62.encode(expires_at)
response.set_cookie("ds_actor", self.sign(data, "actor"))
def delete_actor_cookie(self, response: Response):
response.set_cookie("ds_actor", "", expires=0, max_age=0)
async def _asset_urls(self, key, template, context, request, view_name):
# Flatten list-of-lists from plugins:
seen_urls = set()
collected = []
for hook in getattr(pm.hook, key)(
template=template.name,
database=context.get("database"),
table=context.get("table"),
columns=context.get("columns"),
view_name=view_name,
request=request,
datasette=self,
):
hook = await await_me_maybe(hook)
collected.extend(hook)
collected.extend((self.config or {}).get(key) or [])
output = []
for url_or_dict in collected:
if isinstance(url_or_dict, dict):
url = url_or_dict["url"]
sri = url_or_dict.get("sri")
module = bool(url_or_dict.get("module"))
else:
url = url_or_dict
sri = None
module = False
if url in seen_urls:
continue
seen_urls.add(url)
if url.startswith("/"):
# Take base_url into account:
url = self.urls.path(url)
script = {"url": url}
if sri:
script["sri"] = sri
if module:
script["module"] = True
output.append(script)
return output
def _config(self):
return redact_keys(
self.config, ("secret", "key", "password", "token", "hash", "dsn")
)
def _routes(self):
routes = []
for routes_to_add in pm.hook.register_routes(datasette=self):
for regex, view_fn in routes_to_add:
routes.append((regex, wrap_view(view_fn, self)))
def add_route(view, regex):
routes.append((regex, view))
add_route(IndexView.as_view(self), r"/(\.(?P<format>jsono?))?$")
add_route(IndexView.as_view(self), r"/-/(\.(?P<format>jsono?))?$")
add_route(permanent_redirect("/-/"), r"/-$")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
add_route(favicon, "/favicon.ico")
add_route(
asgi_static(app_root / "datasette" / "static"), r"/-/static/(?P<path>.*)$"
)
for path, dirname in self.static_mounts:
add_route(asgi_static(dirname), r"/" + path + "/(?P<path>.*)$")
# Mount any plugin static/ directories
for plugin in get_plugins():
if plugin["static_path"]:
add_route(
asgi_static(plugin["static_path"]),
f"/-/static-plugins/{plugin['name']}/(?P<path>.*)$",
)
# Support underscores in name in addition to hyphens, see https://github.com/simonw/datasette/issues/611
add_route(
asgi_static(plugin["static_path"]),
"/-/static-plugins/{}/(?P<path>.*)$".format(
plugin["name"].replace("-", "_")
),
)
add_route(
permanent_redirect(
"/_memory", forward_query_string=True, forward_rest=True
),
r"/:memory:(?P<rest>.*)$",
)
add_route(
JsonDataView.as_view(self, "versions.json", self._versions),
r"/-/versions(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(
self, "plugins.json", self._plugins, needs_request=True
),
r"/-/plugins(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(self, "settings.json", lambda: self._settings),
r"/-/settings(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config()),
r"/-/config(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(self, "threads.json", self._threads),
r"/-/threads(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(self, "databases.json", self._connected_databases),
r"/-/databases(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(
self, "actor.json", self._actor, needs_request=True, permission=None
),
r"/-/actor(\.(?P<format>json))?$",
)
add_route(
JsonDataView.as_view(
self,
"actions.json",
self._actions,
template="debug_actions.html",
permission="permissions-debug",
),
r"/-/actions(\.(?P<format>json))?$",
)
add_route(
AuthTokenView.as_view(self),
r"/-/auth-token$",
)
add_route(
CreateTokenView.as_view(self),
r"/-/create-token$",
)
add_route(
ApiExplorerView.as_view(self),
r"/-/api$",
)
add_route(
TablesView.as_view(self),
r"/-/tables(\.(?P<format>json))?$",
)
add_route(
InstanceSchemaView.as_view(self),
r"/-/schema(\.(?P<format>json|md))?$",
)
add_route(
LogoutView.as_view(self),
r"/-/logout$",
)
add_route(
PermissionsDebugView.as_view(self),
r"/-/permissions$",
)
add_route(
AllowedResourcesView.as_view(self),
r"/-/allowed(\.(?P<format>json))?$",
)
add_route(
PermissionRulesView.as_view(self),
r"/-/rules(\.(?P<format>json))?$",
)
add_route(
PermissionCheckView.as_view(self),
r"/-/check(\.(?P<format>json))?$",
)
add_route(
MessagesDebugView.as_view(self),
r"/-/messages$",
)
add_route(
AllowDebugView.as_view(self),
r"/-/allow-debug$",
)
add_route(
wrap_view(PatternPortfolioView, self),
r"/-/patterns$",
)
add_route(
wrap_view(database_download, self),
r"/(?P<database>[^\/\.]+)\.db$",
)
add_route(
wrap_view(DatabaseView, self),
r"/(?P<database>[^\/\.]+)(\.(?P<format>\w+))?$",
)
add_route(TableCreateView.as_view(self), r"/(?P<database>[^\/\.]+)/-/create$")
add_route(
DatabaseSchemaView.as_view(self),
r"/(?P<database>[^\/\.]+)/-/schema(\.(?P<format>json|md))?$",
)
add_route(
wrap_view(QueryView, self),
r"/(?P<database>[^\/\.]+)/-/query(\.(?P<format>\w+))?$",
)
add_route(
wrap_view(table_view, self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^\/\.]+)(\.(?P<format>\w+))?$",
)
add_route(
RowView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^/]+?)/(?P<pks>[^/]+?)(\.(?P<format>\w+))?$",
)
add_route(
TableInsertView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^\/\.]+)/-/insert$",
)
add_route(
TableUpsertView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^\/\.]+)/-/upsert$",
)
add_route(
TableDropView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^\/\.]+)/-/drop$",
)
add_route(
TableSchemaView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^\/\.]+)/-/schema(\.(?P<format>json|md))?$",
)
add_route(
RowDeleteView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^/]+?)/(?P<pks>[^/]+?)/-/delete$",
)
add_route(
RowUpdateView.as_view(self),
r"/(?P<database>[^\/\.]+)/(?P<table>[^/]+?)/(?P<pks>[^/]+?)/-/update$",
)
return [
# Compile any strings to regular expressions
((re.compile(pattern) if isinstance(pattern, str) else pattern), view)
for pattern, view in routes
]
async def resolve_database(self, request):
database_route = tilde_decode(request.url_vars["database"])
try:
return self.get_database(route=database_route)
except KeyError:
raise DatabaseNotFound(database_route)
async def resolve_table(self, request):
db = await self.resolve_database(request)
table_name = tilde_decode(request.url_vars["table"])
# Table must exist
is_view = False
table_exists = await db.table_exists(table_name)
if not table_exists:
is_view = await db.view_exists(table_name)
if not (table_exists or is_view):
raise TableNotFound(db.name, table_name)
return ResolvedTable(db, table_name, is_view)
async def resolve_row(self, request):
db, table_name, _ = await self.resolve_table(request)
pk_values = urlsafe_components(request.url_vars["pks"])
sql, params, pks = await row_sql_params_pks(db, table_name, pk_values)
results = await db.execute(sql, params, truncate=True)
row = results.first()
if row is None:
raise RowNotFound(db.name, table_name, pk_values)
return ResolvedRow(db, table_name, sql, params, pks, pk_values, results.first())
def app(self):
"""Returns an ASGI app function that serves the whole of Datasette"""
routes = self._routes()
async def setup_db():
# First time server starts up, calculate table counts for immutable databases
for database in self.databases.values():
if not database.is_mutable:
await database.table_counts(limit=60 * 60 * 1000)
async def custom_csrf_error(scope, send, message_id):
await asgi_send(
send,
content=await self.render_template(
"csrf_error.html",
{"message_id": message_id, "message_name": Errors(message_id).name},
),
status=403,
content_type="text/html; charset=utf-8",
)
asgi = asgi_csrf.asgi_csrf(
DatasetteRouter(self, routes),
signing_secret=self._secret,
cookie_name="ds_csrftoken",
skip_if_scope=lambda scope: any(
pm.hook.skip_csrf(datasette=self, scope=scope)
),
send_csrf_failed=custom_csrf_error,
)
if self.setting("trace_debug"):
asgi = AsgiTracer(asgi)
asgi = AsgiLifespan(asgi)
asgi = AsgiRunOnFirstRequest(asgi, on_startup=[setup_db, self.invoke_startup])
for wrapper in pm.hook.asgi_wrapper(datasette=self):
asgi = wrapper(asgi)
return asgi
| Datasette |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image35.py | {
"start": 315,
"end": 846
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image35.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "zero_dpi.jpg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | doocs__leetcode | solution/2100-2199/2128.Remove All Ones With Row and Column Flips/Solution.py | {
"start": 0,
"end": 246
} | class ____:
def removeOnes(self, grid: List[List[int]]) -> bool:
s = set()
for row in grid:
t = tuple(row) if row[0] == grid[0][0] else tuple(x ^ 1 for x in row)
s.add(t)
return len(s) == 1
| Solution |
python | huggingface__transformers | tests/models/clvp/test_modeling_clvp.py | {
"start": 7091,
"end": 10428
} | class ____:
def __init__(
self,
parent,
batch_size=2,
seq_length=3,
is_training=False,
vocab_size=300,
max_position_embeddings=256,
max_text_tokens=256,
use_input_mask=True,
hidden_size=128,
num_hidden_layers=2,
num_attention_heads=2,
bos_token_id=97,
eos_token_id=98,
relative_attention_num_buckets=4,
relative_attention_max_distance=16,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.max_text_tokens = max_text_tokens
self.use_input_mask = use_input_mask
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
def get_config(self):
decoder_config = ClvpDecoderConfig(
vocab_size=self.vocab_size,
max_position_embeddings=self.max_position_embeddings,
max_text_tokens=self.max_text_tokens,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
relative_attention_num_buckets=self.relative_attention_num_buckets,
relative_attention_max_distance=self.relative_attention_max_distance,
)
return decoder_config
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
decoder_config = self.get_config()
return decoder_config, input_ids, input_mask
def create_and_check_model(self, config, input_ids, attention_mask):
model = ClvpForCausalLM(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids=input_ids, attention_mask=attention_mask)
self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"input_ids": input_ids.to(torch_device),
"attention_mask": attention_mask.to(torch_device),
}
return config, inputs_dict
@require_torch
| ClvpDecoderTester |
python | ray-project__ray | python/ray/train/tensorflow/config.py | {
"start": 1067,
"end": 1845
} | class ____(Backend):
def on_start(self, worker_group: BaseWorkerGroup, backend_config: TensorflowConfig):
# Compute URL for initializing distributed setup.
def get_url():
address, port = get_address_and_port()
return build_address(address, port)
urls = worker_group.execute(get_url)
# Get setup tasks in order to throw errors on failure.
setup_futures = []
for i in range(len(worker_group)):
setup_futures.append(
worker_group.execute_single_async(
i,
_setup_tensorflow_environment,
worker_addresses=urls,
index=i,
)
)
ray.get(setup_futures)
| _TensorflowBackend |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ext.py | {
"start": 11980,
"end": 12784
} | class ____(_regconfig_fn):
"""The PostgreSQL ``to_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.to_tsquery` will be used automatically when invoking
``sqlalchemy.func.to_tsquery()``, ensuring the correct argument and return
type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
| to_tsquery |
python | keras-team__keras | keras/src/backend/__init__.py | {
"start": 2933,
"end": 3088
} | class ____(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
| name_scope |
python | aio-libs__aiohttp | tests/test_pytest_plugin.py | {
"start": 7267,
"end": 7311
} | class ____(TestClient):
pass
| RESTfulClient |
python | pytorch__pytorch | test/test_jit_fuser_te.py | {
"start": 3167,
"end": 99184
} | class ____(JitTestCase):
def setUp(self):
super().setUp()
self.tensorexpr_options = TensorExprTestOptions()
# note: `self.dynamic_shapes` instantiated in specialization of class
# defined below
fusion_strategy = [("DYNAMIC", 20)] if self.dynamic_shapes else [("STATIC", 20)]
self.old_fusion_strategy = torch._C._jit_set_fusion_strategy(fusion_strategy)
self.devices = ["cpu"] if not torch.cuda.is_available() else ["cpu", "cuda"]
self.int_dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
]
self.fp_dtypes = [
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
]
self.dtypes = self.int_dtypes + self.fp_dtypes
def tearDown(self):
self.tensorexpr_options.restore()
torch._C._jit_set_fusion_strategy(self.old_fusion_strategy)
super().tearDown()
def assertAllFused(self, graph, except_for=None):
except_for = except_for if except_for is not None else set()
# TODO - upstream
guards = (
"prim::TypeCheck",
"prim::RequiresGradCheck",
"prim::TensorExprDynamicGuard",
)
guard_found = False
def autodiff_guard(node):
if node.kind() != "aten::all":
return False
inps = list(node.inputs())
if len(inps) != 1 or inps[0].node().kind() != "prim::ListConstruct":
return False
li_inps = list(inps[0].node().inputs())
for li_inp in li_inps:
if li_inp.node().kind() in (
"prim::AutogradAllNonZero",
"prim::AutogradAllZero",
):
return True
return False
def is_guard(node):
return node.kind() in guards or autodiff_guard(node)
for node in graph.block().nodes():
if node.kind() == "prim::Constant":
continue
if is_guard(node):
self.assertFalse(guard_found)
guard_found = True
continue
if node.kind() in except_for:
continue
if node.kind() == "prim::If":
self.assertTrue(is_guard(node.prev()))
continue
self.assertTrue(False, "Found unexpected node:" + node.kind())
self.assertTrue(guard_found)
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def findFusionGroups(self, graph):
result = []
for n in graph.nodes():
if n.kind() == FUSION_GROUP:
result.append(n.g("Subgraph"))
continue
for block in n.blocks():
result += self.findFusionGroups(block)
return result
def test_typecheck(self):
a = torch.ones(1)
def fused_kernel(a, b):
return (a + b) * 2.0
scripted = self.checkScript(fused_kernel, (a, a))
graph = scripted.graph_for(a, a)
# double check we fused
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
# we use a bigger tensor now (size 2)
# if we won't trigger a recompilation
# we will still create a tensor up to (size 1)
# if the type check fails
a = torch.ones(2)
# shape changed if we don't trigger recompilation
# we would compute the wrong result silently
self.assertEqual(scripted(a, a), fused_kernel(a, a))
def test_sum_simple(self):
def func(x):
x2 = x * x
return x2.sum()
with texpr_reductions_enabled():
a = torch.tensor(list(range(15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_nop(self):
pass
def test_sum_dim(self):
def func(x):
return x.sum((0,)) * 2
def func_neg(x):
return x.sum((-2,)) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
scripted = self.checkScript(func_neg, (a,))
self.assertLastGraphAllFused()
def test_sum_keepdim_cast(self):
def func(x):
return x.sum((0,), keepdim=True, dtype=torch.double) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_abs(self):
for device in self.devices:
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_unsqueeze_size_calculation(self):
for device in self.devices:
def foo(b, d):
x = d.unsqueeze(1)
y = x * 42.0
z = b + y
r = z / 42.0
return r
inputs = (
torch.rand(20, 28, device=device, requires_grad=True),
torch.rand(20, device=device),
)
scripted = self.checkScript(foo, inputs)
self.assertAllFused(scripted.graph_for(*inputs))
def test_zero_element_tensors(self):
for device in self.devices:
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device=device)
cos = torch.zeros(0, device=device)
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
def test_arg_configurations_smoke(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
traced_f = torch.jit.trace(f, (x, y))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
def test_broadcast(self):
for device in self.devices:
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
]
self.checkScript(scaleshift, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on"
)
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device="cuda")
y = torch.randn(4, 4, dtype=torch.half, device="cuda")
funcs = [self.fn_test_comparison_gt_lt, self.fn_test_relu, self.fn_test_exp]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(),
local_inputs,
allow_unused=True,
retain_graph=True,
)
fusion_grads = torch.autograd.grad(
fusion_output.sum(),
local_fusion_inputs,
allow_unused=True,
retain_graph=True,
)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
def test_checks_cat_inputs(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x**2, y + 4 * y + y**3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device=device)
y = torch.randn(1, 4, dtype=torch.float, device=device)
scripted = self.checkScript(f, (x, y))
self.assertEqual(scripted(x, y).shape, (3, 4))
self.assertAllFused(scripted.graph_for(x, y))
def test_chunk(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device=device)]
self.checkScript(fn, inputs)
self.assertLastGraphAllFused()
def test_chunk_correctness(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(
1, 2
),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
self.assertLastGraphAllFused()
def test_chunk_distributes(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# XXX: The old fuser does broadcast_tensors but the new fuser doesn't.
# FileCheck().check("broadcast_tensors").check('with ' + FUSION_GROUP + '_') \
# .check_count('ConstantChunk', 2, exactly=True).run(str(graph))
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
def test_chunk_motion_deduplicates_inputs(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [torch.tensor([1.1, 1.2], device=device, dtype=torch.float)]
for func in [func1, func2]:
self.checkScript(func, inputs)
self.assertLastGraphAllFused()
def test_chunk_multiple(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 6, 3, dtype=torch.float, device=device),
torch.randn(10, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 2, 6, dtype=torch.float, device=device),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
def test_minmax(self):
for device in self.devices:
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float)
b = torch.randn(4, 4, dtype=torch.float)
nan = torch.tensor(float("nan"), dtype=torch.float)
for f, inputs, device in product(
(tmax, tmin), ([a, b], [a, nan], [b, nan]), self.devices
):
inputs = [t.to(device) for t in inputs]
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
def test_clamp(self):
for device in self.devices:
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float("inf"))
def funcNegInf(a, b):
return torch.clamp(a + b, min=float("-inf"), max=0)
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device=device, requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device=device)
nan = torch.tensor(float("nan"), dtype=torch.float, device=device)
funcs = (func2, funcInf, funcNegInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(
s.graph_for(inp1, inp2),
except_for={"aten::size", "aten::_size_if_not_equal"},
)
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(
graph,
except_for={"aten::Float", "aten::_grad_sum_to_size"}.union(
autograd_check_set
),
)
def test_clamp_double(self):
for device in self.devices:
def clamp_double(x, eta: float):
return 1 - x.clamp(eta, 1 - eta)
x = torch.tensor([1.0, 1.0], dtype=torch.double, device=device)
eta = 1e-9
s = self.checkScript(
clamp_double,
(x, eta),
profiling=ProfilingMode.PROFILING,
atol=1e-10,
rtol=1e-5,
)
self.assertAllFused(s.graph_for(x, eta), except_for={"aten::sub"})
def test_clamp_int(self):
for device in self.devices:
def clamp_int(x, eta: int):
return x.clamp(0, eta)
x = torch.tensor([1, 1], device=device)
eta = 1 << 32
s = self.checkScript(clamp_int, (x, eta), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(x, eta))
def test_add_bool(self):
sizes = [(1,), (2,), (4, 4)]
for device, size in product(self.devices, sizes):
def f(x, y, z):
return x + y + z
x = torch.randint(0, 2, size, dtype=torch.bool, device=device)
y = torch.randint(0, 2, size, dtype=torch.bool, device=device)
z = torch.randint(0, 2, size, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_mul_bool(self):
for device in self.devices:
def f(x, y, z):
return x * y * z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_div_bool(self):
for device in self.devices:
def f(x, y, z):
return (x + y) / z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.ones_like(x, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_bitwise_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
operator.__lshift__,
operator.__rshift__,
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_minmax_int_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [torch.min, torch.max]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_comparison_eq_ne(self):
for device in self.devices:
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
def test_comparison_gt_lt(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_comparison_ge_le(self):
for device in self.devices:
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(
ge.graph_for(x, y),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
def test_addcmul(self):
for device in self.devices:
t = torch.randn(1, 4, dtype=torch.float, device=device)
t1 = torch.randn(4, 1, dtype=torch.float, device=device)
t2 = torch.randn(1, 4, dtype=torch.float, device=device)
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add(").check("aten::addcmul(").run(
str(fusion_groups[0])
)
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
def test_lerp(self):
for device in self.devices:
start = torch.randn(4, 1, dtype=torch.float, device=device)
end = torch.randn(1, 4, dtype=torch.float, device=device)
weight = torch.tensor(0.5, dtype=torch.float, device=device)
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
# TODO: uncomment when TE enables support for scalar tensors
# ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
# graph = ge_weight_tensor.graph_for(start, end)
# self.assertAllFused(graph)
def test_concat(self):
# disabling concat causes error with single concat node
with set_fusion_group_inlining(True):
for device in self.devices:
hx = torch.randn(3, 20, dtype=torch.float, device=device)
cx = torch.randn(3, 20, dtype=torch.float, device=device)
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
# XXX: TE fuser can handle concats in a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_remove_output_used_only_in_size(self):
for device in self.devices:
def test_fuse(a, b):
c = a + b
d = c + b
return d
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True, device=device)
y = torch.ones(1, requires_grad=True, device=device)
warmup_forward(scripted_f, x, y, profiling_count=3)
g = scripted_f.graph_for(x, y)
diff_nodes = g.findAllNodes("prim::DifferentiableGraph")
self.assertEqual(len(diff_nodes), 1)
g = diff_nodes[0].g("Subgraph")
if_nodes = [n for n in g.nodes() if n.kind() == "prim::If"]
self.assertEqual(len(if_nodes), 1)
# the if node and the fusion group inside it should only have one output
self.assertEqual(len(list(if_nodes[0].outputs())), 1)
def test_concat_invariant(self):
for device in self.devices:
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device=device)
y = torch.randn(2, 2, dtype=torch.float, device=device)
z = torch.randn(4, 2, dtype=torch.float, device=device)
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={"aten::add"})
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + 0.5 * y).exp()
def test_exp(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_threshold(self):
for device in self.devices:
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device=device)
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
def test_scalar_arg(self):
for device in self.devices:
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device=device)
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(
x: torch.Tensor, p: float
) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
out = scripted(x, p)
out = scripted(x, p)
self.assertAllFused(
scripted.graph_for(x, p),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
# TODO: we're currently not checking 'device' in the type info when pulling
# nodes into a fusion group. We should fix that and re-enable this test.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device="cuda:0"),
torch.randn(4, 4, dtype=torch.float, device="cuda:1"),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(ge.graph_for(*inputs), FUSION_GROUP, 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
# FIXME: Use the TE fuser's way of querying the cache.
# self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = "cuda:" + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_lstm(self):
for device in self.devices:
inputs = get_lstm_inputs(device, training=True)
module = self.checkScript(LSTMCellS, inputs)
self.assertAllFused(
module.graph_for(inputs), except_for={"prim::TupleConstruct"}
)
def test_lstm_concat(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
except_nodes = {"prim::TupleConstruct", "aten::linear"}
# TODO... Chunk
if self.dynamic_shapes:
except_nodes = except_nodes.union(
{"aten::add", "prim::ConstantChunk"}
)
self.assertAllFused(ge.graph_for(*inputs), except_for=except_nodes)
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_lstm_gates_permutations(self):
for device in self.devices:
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ["x.mm(w_ih.t())", "hx.mm(w_hh.t())", "b_ih", "b_hh"]
template = dedent(
"""
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
"""
)
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
fusion_group_len = 2 if self.dynamic_shapes else 1
inputs = get_lstm_inputs(device, training=False)
self.assertEqual(cu.cell(*inputs), scope["cell"](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len
)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
def test_lstm_traced(self):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertEqual(len(fusion_groups), fusion_group_len)
f = FileCheck()
if not self.dynamic_shapes:
f.check("Chunk")
f.check("aten::sigmoid").check("aten::tanh").run(
str(fusion_groups[0 if not self.dynamic_shapes else 1])
)
def test_milstm(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
for device in self.devices:
inputs = get_milstm_inputs(device, training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len, consider_subgraphs=True
)
FileCheck().check("DifferentiableGraph").check("TupleConstruct").check_next(
"return"
).check(FUSION_GROUP).run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ["d"]
def __init__(self) -> None:
super().__init__()
self.d = torch.device("cuda")
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device="cuda")
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + 0.5 * y)
def test_relu(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_erf(self):
for device in self.devices:
# only enabled on gpu
if device == "cpu":
continue
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(
ge.graph_for(x),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
# If using profiling, a different function is needed to test different
# shapes, or we'll use a cached script.
def fn_test_rand2(x, y):
r = torch.rand_like(y)
return r * x * x
x = torch.randn(4, 4, dtype=torch.float, device="cuda")
y = torch.randn(4, 4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_rand)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(
script_f.graph_for(x, y),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device="cuda")
y = torch.ones(4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_rand2)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out[0, :] + torch.zeros(4, 4, device="cuda"), out)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_diamond(self):
def fn_test_diamond(x, y):
r = torch.rand_like(y)
a = x + r
b = y - r
return a + b
x = torch.randn(4, 4, dtype=torch.float, device="cuda")
y = torch.randn(4, 4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_diamond)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out, x + y)
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device="cpu")
y = torch.tensor(1, dtype=torch.float, device="cpu")
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_inlined_optimized_graph(self):
@torch.jit.script
def foo(x):
return torch.relu(x + x)
for _ in range(3):
foo(torch.rand([4, 4]))
for _ in range(3):
foo(torch.rand([10]))
for _ in range(3):
foo(torch.rand([2, 2, 2]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("prim::If", 1, exactly=True).check(
"prim::TensorExpr"
).run(g)
torch._C._jit_pass_inline(g)
f = FileCheck()
for _ in range(3):
f.check("prim::If").check("prim::TensorExpr")
f.run(g)
def test_small_constant(self):
for device in self.devices:
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
# Currently we don't pull constants into fusion groups, because in some
# cases it could remove the constant from the original graph and now our
# fusion group needs to return that constant for its other users.
# Instead of never pulling constants into the fusion group, we should just
# be more careful at how we rewrite its users.
# TODO: fix that and reenable the test.
def test_tensor_scalar_ops(self):
for device in self.devices:
def should_fuse(x):
z = 3.0
y = x + z
return x * y
def should_fuse_scalar(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device=device)]
ge = self.checkScript(should_fuse, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add").check("aten::mul").run(str(fusion_groups[0]))
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(3.0, dtype=torch.float, device=device),
]
ge = self.checkScript(should_fuse_scalar, inputs)
# Check that the fused graph computes correct results when the scalar
# input changes.
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(7.0, dtype=torch.float, device=device),
]
self.assertEqual(ge(*inputs), should_fuse_scalar(*inputs))
# The TE fuser supports fusion of non-constant scalars
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 1, consider_subgraphs=True
)
def test_where_and_typing(self):
for device in self.devices:
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double, device=device)
y = torch.randn(4, 4, dtype=torch.double, device=device)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(
script_f.graph_for(x, y), except_for={"prim::TupleConstruct"}
)
def test_disabled(self):
old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_override_can_fuse_on_cpu(False)
def fn(a):
return a**2 + a
x = torch.randn(4, dtype=torch.float, device="cpu")
s = self.checkScript(fn, (x,))
g = s.graph_for(x)
self.assertEqual(len(self.findFusionGroups(g)), 0)
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)
def data_for(self, dtype, device="cuda", size=None):
if size is None:
v = torch.arange(1, 3, dtype=torch.float, device=device)
else:
v = torch.rand(*size, device=device)
if dtype == torch.bool:
return v > 2
elif dtype in [torch.qint8, torch.quint8, torch.qint32]:
return torch.quantize_per_tensor(v, 0.1, 1, dtype=dtype)
else:
return v.to(dtype)
def test_torch_to(self):
# test no op
@torch.jit.script
def foo(x):
return x.to(torch.float)
foo(torch.tensor([3.0], dtype=torch.float))
foo(torch.tensor([3.0], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test not fusing non-const inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(dtype)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test not fusing to_pinned inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(pin_memory=True)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test across-device not supported
if torch.cuda.is_available():
@torch.jit.script
def foo(x):
return x.to(device="cuda")
foo(torch.tensor([3.0], dtype=torch.float))
foo(torch.tensor([3.0], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
sizes = [(1, 4), (4, 4)]
# reuses cast impl, smaller dtype set for faster test
dtypes = [
torch.bool,
torch.int,
torch.float16,
torch.float32,
torch.float64,
]
class MyMod(torch.nn.Module):
def __init__(self, dtype):
super().__init__()
self.dtype = dtype
def forward(self, x):
return x.to(self.dtype)
bad_dtypes = []
for dtype, output_dtype, device, size in product(
dtypes, dtypes, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
if dtype == output_dtype:
continue
x = self.data_for(dtype, device, size=size)
mod = MyMod(output_dtype)
ref = mod.forward(x)
# use freezing to make non-Tensor args to `to` constant
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
warmup_forward(mod.forward, x)
self.assertEqual(ref, mod.forward(x))
self.assertLastGraphAllFused()
@unittest.skip("Temporarily disabled")
def test_masked_fill(self):
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
# torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
sizes = [(2,), (4, 4)]
for self_dtype, device, scalar_val, size in product(
dtypes, self.devices, [0.4, 3], sizes
):
input_v = self.data_for(self_dtype, device, size=size)
mask = self.data_for(torch.bool, device, size=size)
def fn(input_v, mask):
return torch.masked_fill(input_v, mask, scalar_val)
ref = fn(input_v, mask)
try:
t = torch.jit.trace(fn, (input_v, mask))
torch.testing.assert_close(ref, t(input_v, mask))
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(
[
"Failed:",
str(self_dtype),
op.__name__, # noqa: F821
device,
str(size),
]
)
) from e
def test_isnan(self):
x = torch.rand([4])
x[0] = float("nan")
inputs = [x, torch.tensor([float("nan"), 0.5])]
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
for inp, device, dtype in product(inputs, self.devices, dtypes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
inp = inp.to(device=device, dtype=dtype)
try:
f = torch.jit.trace(lambda x: x.isnan(), (inp,))
warmup_forward(f, inp)
self.assertEqual(f(inp), inp.isnan())
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), "isnan", device])
) from e
def test_gelu(self):
def apply(fn):
return lambda x, approximate: fn(x, approximate)
unary_ops = [
F.gelu,
]
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(
self.dtypes, unary_ops, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
cond = self.data_for(torch.bool, device)
fn = apply(op)
ref = fn(x, cond)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, cond))
torch.testing.assert_close(ref, t(x, cond))
self.assertAllFused(t.graph_for(x, cond))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
) from e
def test_unary_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply(fn):
return lambda x: fn(x)
unary_ops = [
torch.lgamma,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
F.relu6,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.sin,
torch.tan,
torch.acos,
torch.asin,
torch.cosh,
torch.sinh,
torch.atan,
torch.tanh,
F.hardtanh,
F.hardsigmoid,
F.hardswish,
F.softplus,
F.silu,
F.mish,
F.elu,
torch.sqrt,
torch.rsqrt,
torch.abs,
# TODO broken on int8 since
# https://github.com/pytorch/pytorch/pull/85144
# RuntimeError: Invalid integral op_type: 23
# torch.ceil,
# torch.floor,
# torch.round,
# torch.trunc,
torch.frac,
# TODO: broken on ROCm?
# F.hardshrink,
F.leaky_relu,
lambda x: torch.threshold(x, 0, -10),
# TODO: broken since type promotion was added
# lambda x: torch.clamp(x, -10, 10),
]
gpu_only = {torch.erf, torch.erfc}
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(
self.dtypes, unary_ops, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
# todo - re-enable. fails with .500
if dtype == torch.bfloat16 and op == torch.round:
continue
if op in gpu_only and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
fn = apply(op)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x,))
torch.testing.assert_close(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(
["Failed:", str(dtype), op.__name__, device, str(size)]
)
) from e
def test_binary_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.min,
torch.max,
lambda x, y: torch.lerp(x, y, 0.5),
torch.atan2,
torch.div,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.lt,
torch.fmod,
torch.remainder,
lambda x, y: y.type_as(x),
]
fp_only = [
torch.fmod,
torch.remainder,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, binary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
self.assertEqual(ref, t(x, y))
if op not in fp_only or dtype.is_floating_point:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_scalar_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
ir_template = """
graph(%x : {dtype_x}, %y : {dtype_y}):
%z = {op}(%x, %y)
return (%z)"""
binary_ops = [
"aten::mul",
"aten::add",
"aten::sub",
"aten::div",
"aten::lt",
"aten::le",
"aten::eq",
"aten::ne",
"aten::gt",
"aten::ge",
"aten::__or__",
"aten::__xor__",
"aten::__and__",
"aten::__lshift__",
"aten::__rshift__",
]
dtypes = ["int", "float", "bool"]
values = {"int": [10, 3], "float": [12.34, 2.78], "bool": [True, False]}
for dtype_x, dtype_y, op in product(dtypes, dtypes, binary_ops):
code = ir_template.format(dtype_x=dtype_x, dtype_y=dtype_y, op=op)
# Interpret the graph
try:
graph = torch._C.parse_ir(code)
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
except Exception:
# If we can't interpret this IR, don't bother checking NNC.
continue
# Compile the graph
try:
k = torch._C._te.TensorExprKernel(graph)
except Exception as e:
raise RuntimeError(" ".join(["Compilation failed:", str(code)])) from e
# Run the graph
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
try:
res = k.run((x, y))
self.assertEqual(ref, res)
except Exception as e:
raise RuntimeError(
" ".join(["Failed at runtime:", str(x), str(y), str(code)])
) from e
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ["cpu"] # No cuda support for ext calls yet
sizes = [
[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = [
"[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if str(size) not in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(" ".join(["Failed:", str(dtype), device])) from e
def test_binary_tensor_scalar_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
# FIXME: Fails in IR Eval: torch.int64 and_ cpu
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.eq,
torch.ne,
torch.ge,
torch.lt,
torch.gt,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(
self.dtypes, binary_ops, devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_div_ops(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
binary_ops = [
torch.div,
torch.remainder,
torch.fmod,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, -2.0, -1] # skip 0
for dtype, op, device, scalar in product(
self.dtypes, binary_ops, devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
except Exception as e:
raise RuntimeError(
f"Failed: {dtype} {op.__name__} {device} {scalar}"
) from e
def test_binary_pow(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
dtypes = [
# FIXME: 'pow' fails with dtype=torch.float16/device=cuda/scalar=0
# torch.float16,
torch.float32,
torch.float64,
# torch.bool intentionally not included
]
binary_ops = [
torch.pow,
]
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(
dtypes, binary_ops, self.devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
torch.lerp,
torch.addcmul,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_norm_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
F.batch_norm,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 3, 128, 128])
y = self.data_for(dtype, device, size=[3])
z = self.data_for(dtype, device, size=[3])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
@unittest.skip(
"FIXME: fuser doesn't include ListConstruct nodes to the group causing a failure"
)
def test_list_ops(self):
def apply(fn):
return lambda x, y, z: fn([x * x, y * y, z * z])
devices = self.devices
list_ops = [
torch.cat,
]
for dtype, op, device in product(self.dtypes, list_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 4, 1, 7])
y = self.data_for(dtype, device, size=[5, 4, 1, 7])
z = self.data_for(dtype, device, size=[5, 4, 1, 7])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_where_ops(self):
def apply(fn):
return lambda cond, x, y: fn(cond, x, y)
ops = [
torch.where,
lambda cond, x, y: torch.where(cond, x, 3.1415),
lambda cond, x, y: torch.where(cond, 42, y),
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
cond = self.data_for(torch.bool, device)
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(cond, x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (cond, x, y))
self.assertEqual(ref, t(cond, x, y))
self.assertAllFused(t.graph_for(cond, x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_unsupported_dtypes(self):
for device in self.devices:
def fn(x):
return x * x + x
unsupported_dtypes = [
torch.uint8,
torch.complex32,
torch.complex64,
torch.complex128,
torch.qint8,
torch.quint8,
torch.qint32,
]
for dtype in unsupported_dtypes:
try:
x = self.data_for(dtype, device)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
t = torch.jit.trace(fn, (x,))
self.assertEqual(ref, t(x))
self.assertEqual(len(self.findFusionGroups(t.graph_for(x))), 0)
def test_superslomo(self):
devices = self.devices.copy()
if not LLVM_ENABLED:
devices.remove("cpu")
for device in devices:
# Test extracted from Super-SloMo: https://github.com/avinashpaliwal/Super-SloMo
# A few interesting things happen here: strided inputs of mixed size,
# plus outputs of mixed shapes. The latter characteristic happened to
# expose a memory corruption bug due to not properly guarding the
# outputs.
def eager(t0, t1, t2, t3, t4):
t5 = torch.mul(t0, t4)
t6 = torch.mul(t2, t3)
t7 = torch.mul(t6, t1)
t9 = torch.add(t5, t7)
t11 = torch.add(t0, t6)
ft_p = torch.div(t9, t11)
return (ft_p, t11, t9, t6)
t0 = torch.rand(1, 6, 352, 352, device=device).transpose(0, 1)
t1 = torch.rand(6, 3, 352, 352, device=device)
t2 = torch.rand(6, device=device)[None, None, None, :].permute(3, 0, 1, 2)
t3 = torch.rand(6, 1, 352, 352, device=device)
t4 = torch.rand(6, 3, 352, 352, device=device)
inputs = [t0, t1, t2, t3, t4]
script = torch.jit.script(eager)
for _ in range(4):
for pair in zip(script(*inputs), eager(*inputs)):
test, ref = pair
torch.testing.assert_close(test, ref)
self.assertAllFused(
script.graph_for(*inputs), except_for={"prim::TupleConstruct"}
)
def test_sub_gt_and(self):
for device in self.devices:
def eager(t1, t2, t3, t4, t: float):
w = t1 - t2
h = t3 - t4
k = (w > t) & (h > t)
assert k.dtype == torch.bool
if t > 0.5:
# Putting a use of k in a never-executed conditional prevents
# profiling its type, which leaves it as "Tensor". If we
# propagate Tensor back to the definition of k, we have to be
# careful not to create a fusion group containing it.
return k + 1
return w
t = torch.rand(8, dtype=torch.float, device=device)
scripted = self.checkScript(eager, (t, t, t, t, 0.1))
@skipIfTorchDynamo("too slow")
def test_chunk_mul_one(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def eager(x):
z, y, w = torch.chunk(x, 3, -1)
return z * 3, y, w
x = torch.rand(64, 1, 3072, dtype=torch.float, device=device)
z, y, w = eager(x)
script = self.checkScript(eager, (x,))
def test_eq_unsqueeze_type_as(self):
for device in self.devices:
def eager(a, b):
mask = b == 1
mask = torch.unsqueeze(mask, -1)
x = mask.type_as(a)
return x, mask
a = torch.rand(1, 64, 1024, device=device, dtype=torch.float)
b = torch.randint(-2, 2, (1, 64), device=device, dtype=torch.long)
script = self.checkScript(eager, (a, b))
def test_neg_pow(self):
def eager_tt(a: torch.Tensor, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
def eager_ts(a: torch.Tensor, b: float):
return torch.neg(torch.pow(a, b))
def eager_st(a: float, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
a = torch.rand(1, dtype=torch.float)
b = torch.rand(1, dtype=torch.float)
s = b.item()
script = self.checkScript(eager_tt, (a, b))
# TODO: re-enable fusion, which doesn't work right now. just test correctness for now
# self.assertAllFused(script.graph_for(a, b))
script = self.checkScript(eager_ts, (a, s))
# self.assertAllFused(script.graph_for(a, s))
script = self.checkScript(eager_st, (s, b))
# self.assertAllFused(script.graph_for(s, b))
@unittest.skipIf(not LLVM_ENABLED, "Too slow to run with the TE interpreter")
def test_conv2d_depthwise(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=72)
input = torch.rand((1, 72, 56, 56), dtype=torch.float)
weight = torch.rand((72, 1, 3, 3), dtype=torch.float)
bias = torch.rand((72), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
self.assertAllFused(script.graph_for(input, weight, bias))
def test_conv2d(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=1)
input = torch.rand((1, 64, 56, 56), dtype=torch.float)
weight = torch.rand((64, 64, 3, 3), dtype=torch.float)
bias = torch.rand((64), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
def test_type_as_cat(self):
with inline_fusion_groups():
def eager(x, y):
return torch.cat((x, y.type_as(x)), dim=1)
dtypes = self.dtypes.copy()
# CPU fuser doesn't support float16.
dtypes.remove(torch.float16)
dtypes.remove(torch.bfloat16)
for dtype1, dtype2 in product(dtypes, dtypes):
x = torch.randint(2, (1, 13)).to(dtype1)
zero = torch.tensor([[0]]).to(dtype2)
one = torch.tensor([[1]]).to(dtype2)
script = torch.jit.trace(eager, (x, zero))
for _ in range(3):
torch.testing.assert_close(script(x, zero), eager(x, zero))
torch.testing.assert_close(script(x, one), eager(x, one))
self.assertAllFused(script.graph_for(x, one))
def test_to_device(self):
def eager(x):
return x.to(device="cpu").relu()
x = torch.rand(8)
script = self.checkScript(eager, (x,))
self.assertAllFused(script.graph_for(x))
def test_dims(self):
def eager(x, y):
return x / (y + 0.0001)
x = torch.linspace(-1, 1, 768, dtype=torch.float32).as_strided(
(1, 1, 768), (768, 1, 1)
)
y = torch.tensor([[[2.0]]], dtype=torch.float32)
script = self.checkScript(eager, (x, y))
self.assertAllFused(script.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_channels_last_dims_dynamic(self):
def eager(x, y):
return x + (y + 0.0001)
indices = [0, 1, 2, 3]
sets = []
for i in range(len(indices) + 1):
for subset in combinations(indices, i):
sets.append(subset) # noqa: PERF402
for set in sets:
size = [2, 3, 4, 5]
for index in set:
size[index] = 1
inp = torch.rand(size).to(memory_format=torch.channels_last).cuda()
with texpr_enable_strategy([("DYNAMIC", 20)]):
foo_s = torch.jit.trace(eager, (inp, inp))
for _ in range(3):
out = foo_s(inp, inp)
out_eager = eager(inp, inp)
self.assertEqual(out_eager, out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("TensorExpr").run(g)
def test_exhaust_specializations(self):
with texpr_enable_strategy([("STATIC", 1)]):
@torch.jit.script
def foo(x):
return x + x + x
for _ in range(3):
foo(torch.rand([2, 2]))
for _ in range(3):
foo(torch.rand([4, 4, 4]))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
FileCheck().check_count("TensorExpr", 2, exactly=True).run(g)
def test_unsqueeze_var_dim(self):
def eager(x, y, z: int):
return x * torch.unsqueeze(y, dim=z)
x = torch.rand(4, 4, 64).permute(1, 0, 2)
y = torch.rand(4, 4)
z = 2
script = self.checkScript(eager, (x, y, z))
def _test_fwd_bwd(self, fn):
x = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
xs = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
script = torch.jit.script(fn)
for _ in range(11):
y = fn(x)
g0 = torch.rand_like(y)
y.backward(g0)
ys = script(xs)
ys.backward(g0)
with torch.no_grad():
x -= 0.1 * x.grad
xs -= 0.1 * xs.grad
x.grad = None
xs.grad = None
torch.testing.assert_close(y, ys)
def test_relu_fwd_bwd(self):
def eager(x):
return torch.relu(x * 1.01)
self._test_fwd_bwd(eager)
def test_hardswish_fwd_bwd(self):
def eager(x):
return F.hardswish(x) * 1.01
self._test_fwd_bwd(eager)
def test_hardsigmoid_fwd_bwd(self):
def eager(x):
return F.hardsigmoid(x) * 1.01
self._test_fwd_bwd(eager)
def test_cat_graph_opt(self):
def foo(x, y, z):
return torch.log(torch.cat([x, y, z]))
self.checkScript(
foo, (torch.rand([5, 5]), torch.rand([2, 5]), torch.rand([1, 5]))
)
# TODO: not sure why not updated graph isn't reflected in last_optimized_graph
self.assertLastGraphAllFused()
def test_dynamic_cat(self):
with inline_fusion_groups():
@torch.jit.script
def repro(
xs: list[torch.Tensor], ys: list[torch.Tensor], zs: list[torch.Tensor]
):
return [
torch.cat([x, torch.cat([y, z], dim=-1)], dim=-1)
for x, y, z in zip(xs, ys, zs)
]
for _ in range(3):
N = 3
xs = [torch.ones(21) for _ in range(N)]
# Note: concat of ys and zs will have the same size for each
# pair, even though the individual ys and zs do not.
ys = [torch.ones(N - i) for i in range(N)]
zs = [torch.ones(i) for i in range(N)]
repro(xs, ys, zs)
def test_scalar_only_inputs(self):
def eager(b: float):
a = torch.ones(1)
return a * b
script = self.checkScript(eager, (1.0,))
def test_cat_2k_args(self):
with inline_fusion_groups():
def eager(x):
return torch.relu(torch.cat([x for _ in range(2000)]))
x = torch.randn(1)
trace = self.checkTrace(eager, (x,))
fusion_groups = self.findFusionGroups(trace.graph_for(x))
self.assertEqual(len(fusion_groups), 0)
def test_adaptive_avg_pool2d(self):
# TODO: once the adaptive_avg_pool2d is available in OpInfo DB, this
# test should be moved there
with inline_fusion_groups():
def foo1(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2, 2))
def foo2(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2))
x = torch.randn(4, 4, 4)
for foo in [foo1, foo2]:
f = torch.jit.trace(foo, (x,))
kernel = torch._C._te.TensorExprKernel(f.graph)
correct_val = f(x)
self.assertEqual(kernel.run((x,)), correct_val)
def test_unrolled_cat(self):
with inline_fusion_groups():
def eager(x):
ret = torch.empty(0)
for i in range(x.shape[0]):
ret = torch.cat([ret, x[i].relu()])
return ret
script = torch.jit.script(eager)
# Warm up with size=1 tensor; since the loop iterates once the
# profile data will be "burned in" assuming size=1, and then
# unrolled.
x = torch.ones(1, 1)
for _ in range(3):
script(x)
torch.testing.assert_close(eager(x), script(x))
# Now when an input hits the unrolled path, it will produce an
# incorrectly-sized tensor, since size=1 has been burned in.
x = torch.ones((8, 1))
torch.testing.assert_close(eager(x), script(x))
@skipIfTorchDynamo("too slow")
@unittest.skipIf(TEST_WITH_ASAN, "takes 10+ minutes on asan")
@unittest.skipIf(TEST_WITH_ROCM, "Tensor-likes are not close for nans")
def test_batch_norm(self):
def test(fn, args):
trace = torch.jit.trace(fn, args)
self.assertAllFused(trace.graph_for(*args))
# TODO: Are `NaN`'s actually ok here or did this pass silently before, because `equal_nan=True` was the
# default?
torch.testing.assert_close(fn(*args), trace(*args), equal_nan=True)
def bn(i, x):
return torch.batch_norm(i, x, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_weight(i, x):
return torch.batch_norm(i, None, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_bias(i, x):
return torch.batch_norm(i, x, None, x, x, False, 0.1, 1e-4, False).relu()
def bn_neither(i, x):
return torch.batch_norm(i, None, None, x, x, False, 0.1, 1e-4, False).relu()
for device in self.devices:
i = torch.randn(4, 16, 32, 40, device=device)
x = torch.randn(16, device=device)
for fn in [bn, bn_no_weight, bn_no_bias, bn_neither]:
test(fn, (i, x))
def test_profiler(self):
@torch.jit.script
def test(x, y, z):
return x * y + z
args = [torch.randn(4) for _ in range(3)]
with torch.autograd.profiler.profile() as prof:
for _ in range(3):
test(*args)
self.assertIn("fused_mul_add", prof.table())
def test_skip_grad_in_check(self):
@torch.jit.script
def foo(x):
return (x + 2) / 2
inp = torch.rand([4, 4])
for _ in range(3):
foo(inp)
inp.requires_grad_(True)
with torch.inference_mode():
for _ in range(3):
foo(inp)
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_inline(g)
FileCheck().check_count("prim::If", 1, exactly=True).run(g)
def test_dynamic_shapes(self):
from functools import partial
n = 10
gen_tensor = (
lambda n: R(1, n),
lambda n: R(n, n),
lambda n: R(n, n).transpose(0, 1),
lambda n: R(n + 1, n + 1, 2)[:n, n, 0],
lambda n: R(n, n, 2)[:, :, 0],
lambda n: R(n, n + 1, n + 2, n + 3).to(memory_format=torch.channels_last),
)
with texpr_enable_strategy([("DYNAMIC", 20)]):
def foo(x, y, z):
return torch.sigmoid(torch.tanh(x))
foo.__disable_jit_function_caching__ = True
def fi(x, y, z):
return torch.tanh(x + y)
fi.__disable_jit_function_caching__ = True
def fum(x, y, z):
return torch.tanh(x + y) + z
fum.__disable_jit_function_caching__ = True
funcs = [foo, fi, fum]
with inline_fusion_groups():
for device in self.devices:
I = partial(torch.randint, 0, 100, device=device)
R = partial(torch.randn, device=device)
for i, func in enumerate(funcs):
num_args = i + 1
for gen in gen_tensor:
inps = (gen(n), gen(n), gen(n))
func_s = torch.jit.trace(func, inps, check_trace=False)
torch._C._jit_pass_erase_shape_information(func_s.graph)
for _ in range(2):
x, y, z = gen(n), gen(n), gen(n)
func_s(x, y, z)
for _incr in range(3):
func_s(*[gen(n + 1) for _ in range(3)])
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
# We should see only one optimized kernel
FileCheck().check_count(
"TensorExprDynamicGuard", 1, exactly=True
).run(g)
self.assertEqual(func(*inps), func_s(*inps))
gen = gen_tensor[0]
inps = (gen(n), gen(n), gen(n))
foo_s = torch.jit.trace(foo, inps)
torch._C._jit_pass_erase_shape_information(foo_s.graph)
g_prev = None
for gen in gen_tensor:
for i in range(3):
foo_s(*[gen(n + i) for _ in range(3)])
inps = (gen(n), gen(n), gen(n))
self.assertEqual(foo_s(*inps), foo(*inps))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
FileCheck().check_count(
"TensorExprDynamicGuard", len(gen_tensor), exactly=True
).run(g)
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_up(self):
def f(x):
y = x._autocast_to_full_precision(True, True)
z = torch.exp(y)
return z
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_down(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.half)
return z
x = torch.rand((2, 2), dtype=torch.float, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
def test_to_dtype(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.bfloat16)
h = z._autocast_to_full_precision(True, True)
i = h.to(dtype=torch.bfloat16)
j = i.to(dtype=torch.float32)
return j
x = torch.rand((2, 2), dtype=torch.float32)
scr = torch.jit.trace(f, x)
scr(x)
scr(x)
self.assertLastGraphAllFused()
self.assertEqual(f(x), scr(x), atol=4e-3, rtol=4e-3)
bf_x = torch.rand((2, 2), dtype=torch.bfloat16)
bf_scr = torch.jit.trace(f, bf_x)
bf_scr(bf_x)
bf_scr(bf_x)
graph = bf_scr.graph_for(bf_x)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 2)
self.assertEqual(f(bf_x), bf_scr(bf_x), atol=4e-3, rtol=4e-3)
def test_with_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4]),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::TensorExprGroup").run(g)
def foo(x):
with torch.jit.strict_fusion():
return x + x + torch.rand([4]) + 3
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(foo)
foo_s(torch.rand([4]))
foo_s(torch.rand([4]))
print(torch.jit.last_executed_optimized_graph())
fc = FileCheck().check("Found unfused operators")
fc.check("aten::rand(SymInt[] size")
fc.check("torch.rand([4]").run(str(error_out.exception))
with warnings.catch_warnings(record=True) as warns:
foo(torch.rand([4]))
FileCheck().check("Only works in script mode").run(str(warns[0]))
def test_autodiff(x):
with torch.jit.strict_fusion():
return torch.rand([4]) + x + x + x
foo_s = torch.jit.script(test_autodiff)
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s(inp)
f = FileCheck().check("unfused operators").check("aten::rand")
f.run(str(error_out.exception))
def test_separate_fusions(x, y):
with torch.jit.strict_fusion():
return x + x + x, y + y + y
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s = torch.jit.script(test_separate_fusions)
foo_s(inp, inp)
f = FileCheck().check("Found multiple fusions")
f.run(str(error_out.exception))
def test_constant_chunk_shapes(self):
# We had an issue where buildShapeExpressions would fail as show below:
#
# %1 : Tensor = Constant[..] # not supported, we don't build this shape
# %2 : Tensor = Constant[..] # not supported
# %3 : Tensor = aten::add(%1, %2) # inputs not supported, we don't build shape
# ... = prim::ConstantChunk[..](%3) # it forgets to check whether input shapes exist, and fails
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
r = torch.tensor(4)
z1, z2 = (x + y + r).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# make sure that we are actually testing the right scenario
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
f_traced = torch.jit.trace(f, (x, y))
for _ in range(4):
# make sure this doesn't error out
res = f_traced(x, y)
self.assertEqual(res, f(x, y))
@unittest.skipIf(not RUN_CUDA_HALF, "half-precision NNC fusion requires CUDA")
def test_pow_multiple_dtype(self):
# https://github.com/pytorch/pytorch/issues/75476
def fn(p: torch.Tensor, gamma: float = 2.0) -> torch.Tensor:
p = torch.sigmoid(p)
result = p**gamma
return result
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
ref = fn(x)
script_fn = torch.jit.script(fn)
for _ in range(4):
res = script_fn(x)
self.assertEqual(ref, res)
| TestTEFuser |
python | cherrypy__cherrypy | cherrypy/tutorial/tut05_derived_objects.py | {
"start": 343,
"end": 1158
} | class ____:
"""Web page base class."""
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
"""Render HTML layout header."""
return """
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
""" % (self.title, self.title)
def footer(self):
"""Render HTML layout footer."""
return """
</body>
</html>
"""
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
| Page |
python | python-attrs__attrs | typing-examples/mypy.py | {
"start": 5916,
"end": 6156
} | class ____:
a: int = attr.ib(repr=True)
b: str = attr.ib(repr=False)
c: str = attr.ib(repr=lambda value: "c is for cookie")
d: bool = attr.ib(repr=str)
# Check some of our own types
@attr.s(eq=True, order=False)
| WithCustomRepr |
python | sqlalchemy__sqlalchemy | test/sql/test_compiler.py | {
"start": 132891,
"end": 193105
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "default"
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list,
) in [
(
select(table1, table2).where(
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam("mytablename"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{"mytablename": None},
[None],
{"mytablename": 5},
{"mytablename": 5},
[5],
),
(
select(table1).where(
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myid"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
text(
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
select(table1).where(
or_(
table1.c.myid == bindparam("myid", unique=True),
table2.c.otherid == bindparam("myid", unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid_1": None, "myid_2": None},
[None, None],
{"myid_1": 5, "myid_2": 6},
{"myid_1": 5, "myid_2": 6},
[5, 6],
),
(
bindparam("test", type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{"test": None},
[None],
{},
{"test": None},
[None],
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(table1)
.where(
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myotherid"),
),
)
.params({"myid": 8, "myotherid": 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{"myid": 8, "myotherid": 7},
[8, 7],
{"myid": 5},
{"myid": 5, "myotherid": 7},
[5, 7],
),
(
select(table1).where(
or_(
table1.c.myid
== bindparam("myid", value=7, unique=True),
table2.c.otherid
== bindparam("myid", value=8, unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{"myid_1": 7, "myid_2": 8},
[7, 8],
{"myid_1": 5, "myid_2": 6},
{"myid_1": 5, "myid_2": 6},
[5, 6],
),
]:
self.assert_compile(
stmt, expected_named_stmt, params=expected_default_params_dict
)
self.assert_compile(
stmt, expected_positional_stmt, dialect=sqlite.dialect()
)
nonpositional = stmt.compile()
positional = stmt.compile(dialect=sqlite.dialect())
pp = positional.params
eq_(
[pp[k] for k in positional.positiontup],
expected_default_params_list,
)
eq_(
nonpositional.construct_params(test_param_dict),
expected_test_params_dict,
)
pp = positional.construct_params(test_param_dict)
eq_(
[pp[k] for k in positional.positiontup],
expected_test_params_list,
)
# check that params() doesn't modify original statement
s = select(table1).where(
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myotherid"),
),
)
s2 = s.params({"myid": 8, "myotherid": 7})
s3 = s2.params({"myid": 9})
assert s.compile().params == {"myid": None, "myotherid": None}
assert s2.compile().params == {"myid": 8, "myotherid": 7}
assert s3.compile().params == {"myid": 9, "myotherid": 7}
# test using same 'unique' param object twice in one compile
s = select(table1.c.myid).where(table1.c.myid == 12).scalar_subquery()
s2 = select(table1, s).where(table1.c.myid == s)
self.assert_compile(
s2,
"SELECT mytable.myid, mytable.name, mytable.description, "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = "
":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)",
)
positional = s2.compile(dialect=sqlite.dialect())
pp = positional.params
assert [pp[k] for k in positional.positiontup] == [12, 12]
# check that conflicts with "unique" params are caught
s = select(table1).where(
or_(table1.c.myid == 7, table1.c.myid == bindparam("myid_1")),
)
assert_raises_message(
exc.CompileError,
"conflicts with unique bind parameter of the same name",
str,
s,
)
s = select(table1).where(
or_(
table1.c.myid == 7,
table1.c.myid == 8,
table1.c.myid == bindparam("myid_1"),
),
)
assert_raises_message(
exc.CompileError,
"conflicts with unique bind parameter of the same name",
str,
s,
)
def test_bind_param_escaping(self):
"""general bind param escape unit tests added as a result of
#8053.
The final application of an escaped param name
was moved out of compiler and into DefaultExecutionContext in
related issue #8056.
However in #8113 we made this conditional to suit usage recipes
posted in the FAQ.
"""
SomeEnum = pep435_enum("SomeEnum")
one = SomeEnum("one", 1)
SomeEnum("two", 2)
t = Table(
"t",
MetaData(),
Column("_id", Integer, primary_key=True),
Column("_data", Enum(SomeEnum)),
)
class MyCompiler(compiler.SQLCompiler):
def bindparam_string(self, name, **kw):
kw["escaped_from"] = name
return super().bindparam_string('"%s"' % name, **kw)
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
self.assert_compile(
t.insert(),
'INSERT INTO t (_id, _data) VALUES (:"_id", :"_data")',
dialect=dialect,
)
compiled = t.insert().compile(
dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data"))
)
# not escaped
params = compiled.construct_params(
{"_id": 1, "_data": one}, escape_names=False
)
eq_(params, {"_id": 1, "_data": one})
# escaped by default
params = compiled.construct_params({"_id": 1, "_data": one})
eq_(params, {'"_id"': 1, '"_data"': one})
# escaped here as well
eq_(compiled.params, {'"_data"': None, '"_id"': None})
# bind processors aren't part of this
eq_(compiled._bind_processors, {"_data": mock.ANY})
dialect.paramstyle = "pyformat"
compiled = t.insert().compile(
dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data"))
)
# FAQ recipe works
eq_(
compiled.string % compiled.params,
"INSERT INTO t (_id, _data) VALUES (None, None)",
)
def test_expanding_non_expanding_conflict(self):
"""test #8018"""
s = select(
literal("x").in_(bindparam("q")),
bindparam("q"),
)
with expect_raises_message(
exc.CompileError,
r"Can't reuse bound parameter name 'q' in both 'expanding' "
r"\(e.g. within an IN expression\) and non-expanding contexts. "
"If this parameter is to "
"receive a list/array value, set 'expanding=True' on "
"it for expressions that aren't IN, otherwise use "
"a different parameter name.",
):
str(s)
def test_unique_binds_no_clone_collision(self):
"""test #6824"""
bp = bindparam("foo", unique=True)
bpc1 = bp._clone(maintain_key=True)
bpc2 = bp._clone(maintain_key=True)
stmt1 = select(bp, bpc1, bpc2)
# OK, still strange that the double-dedupe logic is still *duping*
# the label name, but that's a different issue
self.assert_compile(
stmt1,
"SELECT :foo_1 AS anon_1, :foo_1 AS anon__1, :foo_1 AS anon__2",
)
def _test_binds_no_hash_collision(self):
"""test that construct_params doesn't corrupt dict
due to hash collisions"""
total_params = 100000
in_clause = [":in%d" % i for i in range(total_params)]
params = {"in%d" % i: i for i in range(total_params)}
t = text("text clause %s" % ", ".join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
pp = c.construct_params(params)
eq_(len(set(pp)), total_params, "%s %s" % (len(set(pp)), len(pp)))
eq_(len(set(pp.values())), total_params)
def test_bind_anon_name_no_special_chars(self):
for paramstyle in "named", "pyformat":
dialect = default.DefaultDialect()
dialect.paramstyle = paramstyle
for name, named, pyformat in [
("%(my name)s", ":my_name_s_1", "%(my_name_s_1)s"),
("myname(foo)", ":myname_foo_1", "%(myname_foo_1)s"),
(
"this is a name",
":this_is_a_name_1",
"%(this_is_a_name_1)s",
),
("_leading_one", ":leading_one_1", "%(leading_one_1)s"),
("3leading_two", ":3leading_two_1", "%(3leading_two_1)s"),
("$leading_three", ":leading_three_1", "%(leading_three_1)s"),
("%(tricky", ":tricky_1", "%(tricky_1)s"),
("5(tricky", ":5_tricky_1", "%(5_tricky_1)s"),
]:
t = table("t", column(name, String))
expr = t.c[name] == "foo"
self.assert_compile(
expr,
"t.%s = %s"
% (
dialect.identifier_preparer.quote(name),
named if paramstyle == "named" else pyformat,
),
dialect=dialect,
checkparams={named[1:]: "foo"},
)
def test_bind_anon_name_special_chars_uniqueify_one(self):
# test that the chars are escaped before doing the counter,
# otherwise these become the same name and bind params will conflict
t = table("t", column("_3foo"), column("4%foo"))
self.assert_compile(
(t.c["_3foo"] == "foo") & (t.c["4%foo"] == "bar"),
't._3foo = :3foo_1 AND t."4%foo" = :4_foo_1',
checkparams={"3foo_1": "foo", "4_foo_1": "bar"},
)
def test_bind_anon_name_special_chars_uniqueify_two(self):
t = table("t", column("_3foo"), column("4(foo"))
self.assert_compile(
(t.c["_3foo"] == "foo") & (t.c["4(foo"] == "bar"),
't._3foo = :3foo_1 AND t."4(foo" = :4_foo_1',
checkparams={"3foo_1": "foo", "4_foo_1": "bar"},
)
def test_bind_given_anon_name_dont_double(self):
c = column("id")
l = c.label(None)
# new case as of Id810f485c5f7ed971529489b84694e02a3356d6d
subq = select(l).subquery()
# this creates a ColumnClause as a proxy to the Label() that has
# an anonymous name, so the column has one too.
anon_col = subq.c[0]
assert isinstance(anon_col.name, elements._anonymous_label)
# then when BindParameter is created, it checks the label
# and doesn't double up on the anonymous name which is uncachable
expr = anon_col > 5
self.assert_compile(
expr, "anon_1.id_1 > :param_1", checkparams={"param_1": 5}
)
# see also test_compare.py -> _statements_w_anonymous_col_names
# fixture for cache key
def test_bind_as_col(self):
t = table("foo", column("id"))
s = select(t, literal("lala").label("hoho"))
self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo")
assert [str(c) for c in s.subquery().c] == ["anon_1.id", "anon_1.hoho"]
def test_bind_callable(self):
expr = column("x") == bindparam("key", callable_=lambda: 12)
self.assert_compile(expr, "x = :key", {"x": 12})
def test_bind_params_missing(self):
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(table1)
.where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True),
)
)
.compile()
.construct_params,
params=dict(y=5),
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(table1)
.where(table1.c.myid == bindparam("x", required=True))
.compile()
.construct_params,
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(table1)
.where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True),
)
)
.compile()
.construct_params,
params=dict(y=5),
_group_number=2,
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(table1)
.where(table1.c.myid == bindparam("x", required=True))
.compile()
.construct_params,
_group_number=2,
)
@testing.combinations(
(
select(table1).where(table1.c.myid == 5),
select(table1).where(table1.c.myid == 10),
{"myid_1": 5},
{"myid_1": 10},
None,
None,
),
(
select(table1).where(
table1.c.myid
== bindparam(None, unique=True, callable_=lambda: 5)
),
select(table1).where(
table1.c.myid
== bindparam(None, unique=True, callable_=lambda: 10)
),
{"param_1": 5},
{"param_1": 10},
None,
None,
),
(
table1.update()
.where(table1.c.myid == 5)
.values(name="n1", description="d1"),
table1.update()
.where(table1.c.myid == 10)
.values(name="n2", description="d2"),
{"description": "d1", "myid_1": 5, "name": "n1"},
{"description": "d2", "myid_1": 10, "name": "n2"},
None,
None,
),
(
table1.update().where(table1.c.myid == 5),
table1.update().where(table1.c.myid == 10),
{"description": "d1", "myid_1": 5, "name": "n1"},
{"description": "d2", "myid_1": 10, "name": "n2"},
{"description": "d1", "name": "n1"},
{"description": "d2", "name": "n2"},
),
(
table1.update().where(
table1.c.myid
== bindparam(None, unique=True, callable_=lambda: 5)
),
table1.update().where(
table1.c.myid
== bindparam(None, unique=True, callable_=lambda: 10)
),
{"description": "d1", "param_1": 5, "name": "n1"},
{"description": "d2", "param_1": 10, "name": "n2"},
{"description": "d1", "name": "n1"},
{"description": "d2", "name": "n2"},
),
(
union(
select(table1).where(table1.c.myid == 5),
select(table1).where(table1.c.myid == 12),
),
union(
select(table1).where(table1.c.myid == 5),
select(table1).where(table1.c.myid == 15),
),
{"myid_1": 5, "myid_2": 12},
{"myid_1": 5, "myid_2": 15},
None,
None,
),
)
def test_construct_params_combine_extracted(
self, stmt1, stmt2, param1, param2, extparam1, extparam2
):
if extparam1:
keys = list(extparam1)
else:
keys = []
s1_cache_key = stmt1._generate_cache_key()
s1_compiled = stmt1.compile(cache_key=s1_cache_key, column_keys=keys)
s2_cache_key = stmt2._generate_cache_key()
eq_(s1_compiled.construct_params(params=extparam1), param1)
eq_(
s1_compiled.construct_params(
params=extparam1, extracted_parameters=s1_cache_key[1]
),
param1,
)
eq_(
s1_compiled.construct_params(
params=extparam2, extracted_parameters=s2_cache_key[1]
),
param2,
)
s1_compiled_no_cache_key = stmt1.compile()
assert_raises_message(
exc.CompileError,
"This compiled object has no original cache key; can't pass "
"extracted_parameters to construct_params",
s1_compiled_no_cache_key.construct_params,
extracted_parameters=s1_cache_key[1],
)
@testing.combinations(True, False, argnames="adapt_before_key")
def test_construct_params_w_bind_clones_post(self, adapt_before_key):
"""test that a BindParameter that has been cloned after the cache
key was generated still matches up when construct_params()
is called with an extracted parameter collection.
This case occurs now with the ORM as the ORM construction will
frequently run clause adaptation on elements of the statement within
compilation, after the cache key has been generated. this adaptation
hits BindParameter objects which will change their key as they
will usually have unqique=True. So the construct_params() process
when it links its internal bind_names to the cache key binds,
must do this badsed on bindparam._identifying_key, which does not
change across clones, rather than .key which usually will.
"""
stmt = select(table1.c.myid).where(table1.c.myid == 5)
# get the original bindparam.
original_bind = stmt._where_criteria[0].right
# it's anonymous so unique=True
is_true(original_bind.unique)
# test #7903 - adapt the statement *before* we make the cache
# key also
if adapt_before_key:
stmt = sql_util.ClauseAdapter(table1).traverse(stmt)
# cache key against the original param
cache_key = stmt._generate_cache_key()
# now adapt the statement
stmt_adapted = sql_util.ClauseAdapter(table1).traverse(stmt)
# new bind parameter has a different key but same
# identifying key
new_bind = stmt_adapted._where_criteria[0].right
eq_(original_bind._identifying_key, new_bind._identifying_key)
ne_(original_bind.key, new_bind.key)
# compile the adapted statement but set the cache key to the one
# generated from the unadapted statement. this will look like
# when the ORM runs clause adaption inside of visit_select, after
# the cache key is generated but before the compiler is given the
# core select statement to actually render.
compiled = stmt_adapted.compile(cache_key=cache_key)
# params set up as 5
eq_(
compiled.construct_params(
params={},
),
{"myid_1": 5},
)
# also works w the original cache key
eq_(
compiled.construct_params(
params={}, extracted_parameters=cache_key[1]
),
{"myid_1": 5},
)
# now make a totally new statement with the same cache key
new_stmt = select(table1.c.myid).where(table1.c.myid == 10)
new_cache_key = new_stmt._generate_cache_key()
# cache keys match
eq_(cache_key.key, new_cache_key.key)
# ensure we get "10" from construct params. if it matched
# based on .key and not ._identifying_key, it would not see that
# the bind parameter is part of the cache key.
eq_(
compiled.construct_params(
params={}, extracted_parameters=new_cache_key[1]
),
{"myid_1": 10},
)
@testing.combinations(True, False, argnames="adapt_before_key")
def test_construct_duped_params_w_bind_clones_post(self, adapt_before_key):
"""same as previous test_construct_params_w_bind_clones_post but
where the binds have been used
repeatedly, and the adaption occurs on a per-subquery basis.
test for #6391
"""
inner_stmt = select(table1.c.myid).where(table1.c.myid == 5)
stmt = union(inner_stmt, inner_stmt, inner_stmt)
# get the original bindparam.
original_bind = inner_stmt._where_criteria[0].right
# same bind three times
is_(stmt.selects[0]._where_criteria[0].right, original_bind)
is_(stmt.selects[1]._where_criteria[0].right, original_bind)
is_(stmt.selects[2]._where_criteria[0].right, original_bind)
# it's anonymous so unique=True
is_true(original_bind.unique)
# variant that exercises #7903
if adapt_before_key:
stmt = sql_util.ClauseAdapter(table1).traverse(stmt)
# cache key against the original param
cache_key = stmt._generate_cache_key()
# now adapt the statement and separately adapt the inner
# SELECTs, since if these subqueries are also ORM then they get adapted
# separately.
stmt_adapted = sql_util.ClauseAdapter(table1).traverse(stmt)
stmt_adapted.selects[0] = sql_util.ClauseAdapter(table1).traverse(
stmt_adapted.selects[0]
)
stmt_adapted.selects[1] = sql_util.ClauseAdapter(table1).traverse(
stmt_adapted.selects[1]
)
stmt_adapted.selects[2] = sql_util.ClauseAdapter(table1).traverse(
stmt_adapted.selects[2]
)
# new bind parameter has a different key but same
# identifying key
new_bind_one = stmt_adapted.selects[0]._where_criteria[0].right
new_bind_two = stmt_adapted.selects[1]._where_criteria[0].right
new_bind_three = stmt_adapted.selects[2]._where_criteria[0].right
for new_bind in (new_bind_one, new_bind_two, new_bind_three):
eq_(original_bind._identifying_key, new_bind._identifying_key)
ne_(original_bind.key, new_bind.key)
# compile the adapted statement but set the cache key to the one
# generated from the unadapted statement. this will look like
# when the ORM runs clause adaption inside of visit_select, after
# the cache key is generated but before the compiler is given the
# core select statement to actually render.
compiled = stmt_adapted.compile(cache_key=cache_key)
# the same parameter was split into three distinct ones, due to
# the separate adaption on a per-subquery basis. but they still
# refer to the original in their _cloned_set and this is what
# has to match up to what's in the cache key.
# params set up as 5
eq_(
compiled.construct_params(
params={},
),
{"myid_1": 5, "myid_2": 5, "myid_3": 5},
)
# also works w the original cache key
eq_(
compiled.construct_params(
params={}, extracted_parameters=cache_key[1]
),
{"myid_1": 5, "myid_2": 5, "myid_3": 5},
)
# now make a totally new statement with the same cache key
new_inner_stmt = select(table1.c.myid).where(table1.c.myid == 10)
new_stmt = union(new_inner_stmt, new_inner_stmt, new_inner_stmt)
new_cache_key = new_stmt._generate_cache_key()
# cache keys match
eq_(cache_key.key, new_cache_key.key)
# ensure we get "10" from construct params. if it matched
# based on .key and not ._identifying_key, it would not see that
# the bind parameter is part of the cache key.
# before #6391 was fixed you would see 5, 5, 10
eq_(
compiled.construct_params(
params={}, extracted_parameters=new_cache_key[1]
),
{"myid_1": 10, "myid_2": 10, "myid_3": 10},
)
def test_construct_params_w_bind_clones_pre(self):
"""test that a BindParameter that has been cloned before the cache
key was generated, and was doubled up just to make sure it has to
be unique, still matches up when construct_params()
is called with an extracted parameter collection.
other ORM features like optimized_compare() end up doing something
like this, such as if there are multiple "has()" or "any()" which would
have cloned the join condition and changed the values of bound
parameters.
"""
stmt = select(table1.c.myid).where(table1.c.myid == 5)
original_bind = stmt._where_criteria[0].right
# it's anonymous so unique=True
is_true(original_bind.unique)
b1 = original_bind._clone()
b1.value = 10
b2 = original_bind._clone()
b2.value = 12
# make a new statement that uses the clones as distinct
# parameters
modified_stmt = select(table1.c.myid).where(
or_(table1.c.myid == b1, table1.c.myid == b2)
)
cache_key = modified_stmt._generate_cache_key()
compiled = modified_stmt.compile(cache_key=cache_key)
eq_(
compiled.construct_params(params={}),
{"myid_1": 10, "myid_2": 12},
)
# make a new statement doing the same thing and make sure
# the binds match up correctly
new_stmt = select(table1.c.myid).where(table1.c.myid == 8)
new_original_bind = new_stmt._where_criteria[0].right
new_b1 = new_original_bind._clone()
new_b1.value = 20
new_b2 = new_original_bind._clone()
new_b2.value = 18
modified_new_stmt = select(table1.c.myid).where(
or_(table1.c.myid == new_b1, table1.c.myid == new_b2)
)
new_cache_key = modified_new_stmt._generate_cache_key()
# cache keys match
eq_(cache_key.key, new_cache_key.key)
# ensure we get both values
eq_(
compiled.construct_params(
params={}, extracted_parameters=new_cache_key[1]
),
{"myid_1": 20, "myid_2": 18},
)
@testing.combinations("default", "default_qmark", argnames="dialect")
def test_literal_execute_combinations(self, dialect):
"""test #10142"""
a = bindparam("a", value="abc", literal_execute=True)
b = bindparam("b", value="def", literal_execute=True)
c = bindparam("c", value="ghi", literal_execute=True)
self.assert_compile(
select(a, b, a, c),
"SELECT 'abc' AS anon_1, 'def' AS anon_2, 'abc' AS anon__1, "
"'ghi' AS anon_3",
render_postcompile=True,
dialect=dialect,
)
def test_tuple_expanding_in_no_values(self):
expr = tuple_(table1.c.myid, table1.c.name).in_(
[(1, "foo"), (5, "bar")]
)
self.assert_compile(
expr,
"(mytable.myid, mytable.name) IN (__[POSTCOMPILE_param_1])",
checkparams={"param_1": [(1, "foo"), (5, "bar")]},
check_post_param={"param_1": [(1, "foo"), (5, "bar")]},
check_literal_execute={},
)
compiled = expr.compile()
(
to_update,
replacement_expr,
) = compiled._literal_execute_expanding_parameter(
"param_1", expr.right, [(1, "foo"), (5, "bar")]
)
eq_(
to_update,
[
("param_1_1_1", 1),
("param_1_1_2", "foo"),
("param_1_2_1", 5),
("param_1_2_2", "bar"),
],
)
eq_(
replacement_expr,
"(:param_1_1_1, :param_1_1_2), (:param_1_2_1, :param_1_2_2)",
)
def test_tuple_expanding_in_values(self):
expr = tuple_(table1.c.myid, table1.c.name).in_(
[(1, "foo"), (5, "bar")]
)
dialect = default.DefaultDialect()
dialect.tuple_in_values = True
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]),
"(mytable.myid, mytable.name) IN (__[POSTCOMPILE_param_1])",
dialect=dialect,
checkparams={"param_1": [(1, "foo"), (5, "bar")]},
check_post_param={"param_1": [(1, "foo"), (5, "bar")]},
check_literal_execute={},
)
compiled = expr.compile(dialect=dialect)
(
to_update,
replacement_expr,
) = compiled._literal_execute_expanding_parameter(
"param_1", expr.right, [(1, "foo"), (5, "bar")]
)
eq_(
to_update,
[
("param_1_1_1", 1),
("param_1_1_2", "foo"),
("param_1_2_1", 5),
("param_1_2_2", "bar"),
],
)
eq_(
replacement_expr,
"VALUES (:param_1_1_1, :param_1_1_2), "
"(:param_1_2_1, :param_1_2_2)",
)
def test_construct_params_repeated_postcompile_params_one(self):
"""test for :ticket:`6202` one - name repeated in positiontup
(e.g. SQL Server using TOP)
"""
t = table("t", column("x"))
stmt = (
select(1)
.where(t.c.x == bindparam(None, value="10", literal_execute=True))
.scalar_subquery()
)
u = union(select(stmt), select(stmt)).subquery().select()
compiled = u.compile(
dialect=default.DefaultDialect(paramstyle="format"),
compile_kwargs={"render_postcompile": True},
)
eq_ignore_whitespace(
compiled.string,
"SELECT anon_2.anon_1 FROM (SELECT (SELECT 1 FROM t "
"WHERE t.x = '10') AS anon_1 UNION SELECT "
"(SELECT 1 FROM t WHERE t.x = '10') AS anon_1) AS anon_2",
)
eq_(compiled.construct_params(_no_postcompile=True), {"param_1": "10"})
def test_construct_params_repeated_postcompile_params_two(self):
"""test for :ticket:`6202` two - same param name used twice
(e.g. Oracle LIMIT)
"""
t = table("t", column("x"))
bp = bindparam(None, value="10")
stmt = (
select(1)
.where(t.c.x == bp.render_literal_execute())
.scalar_subquery()
)
stmt2 = (
select(1)
.where(t.c.x == bp.render_literal_execute())
.scalar_subquery()
)
u = union(select(stmt), select(stmt2)).subquery().select()
compiled = u.compile(
dialect=default.DefaultDialect(paramstyle="named"),
compile_kwargs={"render_postcompile": True},
)
eq_ignore_whitespace(
compiled.string,
"SELECT anon_2.anon_1 FROM (SELECT (SELECT 1 "
"FROM t WHERE t.x = '10') AS anon_1 UNION SELECT "
"(SELECT 1 FROM t WHERE t.x = '10') AS anon_3) AS anon_2",
)
eq_(compiled.construct_params(_no_postcompile=True), {"param_1": "10"})
def test_construct_params_positional_plain_repeated(self):
t = table("t", column("x"))
stmt = (
select(1)
.where(t.c.x == bindparam(None, value="10"))
.where(t.c.x == bindparam(None, value="12", literal_execute=True))
.scalar_subquery()
)
u = union(select(stmt), select(stmt)).subquery().select()
compiled = u.compile(
dialect=default.DefaultDialect(paramstyle="format"),
compile_kwargs={"render_postcompile": True},
)
eq_ignore_whitespace(
compiled.string,
"SELECT anon_2.anon_1 FROM (SELECT (SELECT 1 FROM t "
"WHERE t.x = %s AND t.x = '12') AS anon_1 "
"UNION SELECT (SELECT 1 FROM t WHERE t.x = %s AND t.x = '12') "
"AS anon_1) AS anon_2",
)
eq_(
compiled.construct_params(_no_postcompile=True),
{"param_1": "10", "param_2": "12"},
)
eq_(compiled.positiontup, ["param_1", "param_1"])
def test_tuple_clauselist_in(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[tuple_(table2.c.otherid, table2.c.othername)]
),
"(mytable.myid, mytable.name) IN "
"((myothertable.otherid, myothertable.othername))",
)
@testing.variation("scalar_subquery", [True, False])
def test_select_in(self, scalar_subquery):
stmt = select(table2.c.otherid, table2.c.othername)
if scalar_subquery:
stmt = stmt.scalar_subquery()
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(stmt),
"(mytable.myid, mytable.name) IN (SELECT "
"myothertable.otherid, myothertable.othername FROM myothertable)",
)
def test_expanding_parameter(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
bindparam("foo", expanding=True)
),
"(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])",
)
dialect = default.DefaultDialect()
dialect.tuple_in_values = True
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
bindparam("foo", expanding=True)
),
"(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])",
dialect=dialect,
)
self.assert_compile(
table1.c.myid.in_(bindparam("foo", expanding=True)),
"mytable.myid IN (__[POSTCOMPILE_foo])",
)
def test_limit_offset_select_literal_binds(self):
stmt = select(1).limit(5).offset(6)
self.assert_compile(
stmt, "SELECT 1 LIMIT 5 OFFSET 6", literal_binds=True
)
def test_limit_offset_compound_select_literal_binds(self):
stmt = select(1).union(select(2)).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 UNION SELECT 2 LIMIT 5 OFFSET 6",
literal_binds=True,
)
def test_fetch_offset_select_literal_binds(self):
stmt = select(1).fetch(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 OFFSET 6 ROWS FETCH FIRST 5 ROWS ONLY",
literal_binds=True,
)
def test_fetch_offset_compound_select_literal_binds(self):
stmt = select(1).union(select(2)).fetch(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 UNION SELECT 2 OFFSET 6 ROWS FETCH FIRST 5 ROWS ONLY",
literal_binds=True,
)
def test_multiple_col_binds(self):
self.assert_compile(
select(literal_column("*")).where(
or_(
table1.c.myid == 12,
table1.c.myid == "asdf",
table1.c.myid == "foo",
),
),
"SELECT * FROM mytable WHERE mytable.myid = :myid_1 "
"OR mytable.myid = :myid_2 OR mytable.myid = :myid_3",
)
@testing.combinations("plain", "expanding", argnames="exprtype")
def test_literal_bind_typeerror(self, exprtype):
"""test #8800"""
if exprtype == "expanding":
stmt = select(table1).where(
table1.c.myid.in_([("tuple",), ("tuple",)])
)
elif exprtype == "plain":
stmt = select(table1).where(table1.c.myid == ("tuple",))
else:
assert False
with expect_raises_message(
exc.CompileError,
r"Could not render literal value \"\(\'tuple\',\)\" "
r"with datatype INTEGER; see parent "
r"stack trace for more detail.",
):
stmt.compile(compile_kwargs={"literal_binds": True})
@testing.combinations("plain", "expanding", argnames="exprtype")
def test_literal_bind_dont_know_how_to_quote(self, exprtype):
"""test #8800"""
class MyType(UserDefinedType):
def get_col_spec(self, **kw):
return "MYTYPE"
col = column("x", MyType())
if exprtype == "expanding":
stmt = select(table1).where(col.in_([("tuple",), ("tuple",)]))
elif exprtype == "plain":
stmt = select(table1).where(col == ("tuple",))
else:
assert False
with expect_raises_message(
exc.CompileError,
r"No literal value renderer is available for literal "
r"value \"\('tuple',\)\" with datatype MYTYPE",
):
stmt.compile(compile_kwargs={"literal_binds": True})
@testing.fixture
def ansi_compiler_fixture(self):
dialect = default.DefaultDialect()
class Compiler(compiler.StrSQLCompiler):
ansi_bind_rules = True
dialect.statement_compiler = Compiler
return dialect
@testing.combinations(
(
"one",
select(literal("someliteral")),
"SELECT __[POSTCOMPILE_param_1] AS anon_1",
dict(
check_literal_execute={"param_1": "someliteral"},
check_post_param={},
),
),
(
"two",
select(table1.c.myid + 3),
"SELECT mytable.myid + __[POSTCOMPILE_myid_1] "
"AS anon_1 FROM mytable",
dict(check_literal_execute={"myid_1": 3}, check_post_param={}),
),
(
"three",
select(table1.c.myid.in_([4, 5, 6])),
"SELECT mytable.myid IN (__[POSTCOMPILE_myid_1]) "
"AS anon_1 FROM mytable",
dict(
check_literal_execute={"myid_1": [4, 5, 6]},
check_post_param={},
),
),
(
"four",
select(func.mod(table1.c.myid, 5)),
"SELECT mod(mytable.myid, __[POSTCOMPILE_mod_2]) "
"AS mod_1 FROM mytable",
dict(check_literal_execute={"mod_2": 5}, check_post_param={}),
),
(
"five",
select(literal("foo").in_([])),
"SELECT __[POSTCOMPILE_param_1] IN (__[POSTCOMPILE_param_2]) "
"AS anon_1",
dict(
check_literal_execute={"param_1": "foo", "param_2": []},
check_post_param={},
),
),
(
"six",
select(literal(util.b("foo"))),
"SELECT __[POSTCOMPILE_param_1] AS anon_1",
dict(
check_literal_execute={"param_1": util.b("foo")},
check_post_param={},
),
),
(
"seven",
select(table1.c.myid == bindparam("foo", callable_=lambda: 5)),
"SELECT mytable.myid = __[POSTCOMPILE_foo] AS anon_1 FROM mytable",
dict(check_literal_execute={"foo": 5}, check_post_param={}),
),
argnames="stmt, expected, kw",
id_="iaaa",
)
def test_render_binds_as_literal(
self, ansi_compiler_fixture, stmt, expected, kw
):
"""test a compiler that renders binds inline into
SQL in the columns clause."""
self.assert_compile(
stmt, expected, dialect=ansi_compiler_fixture, **kw
)
def test_render_literal_execute_parameter(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid == bindparam("foo", 5, literal_execute=True)
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = __[POSTCOMPILE_foo]",
)
def test_render_literal_execute_parameter_literal_binds(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid == bindparam("foo", 5, literal_execute=True)
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid = 5",
literal_binds=True,
)
def test_render_literal_execute_sent_parameter_literal_binds(self):
"""test #6863"""
stmt = select(table1.c.myid).where(
table1.c.myid == bindparam("foo", 5, literal_execute=True)
)
eq_ignore_whitespace(
str(
stmt.compile(
compile_kwargs={
"literal_binds": True,
"literal_execute": True,
}
)
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid = 5",
)
def test_render_literal_execute_parameter_render_postcompile(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid == bindparam("foo", 5, literal_execute=True)
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid = 5",
render_postcompile=True,
)
def test_render_expanding_parameter(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid.in_(bindparam("foo", expanding=True))
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid IN (__[POSTCOMPILE_foo])",
)
def test_render_expanding_parameter_literal_binds(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid.in_(bindparam("foo", [1, 2, 3], expanding=True))
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid IN (1, 2, 3)",
literal_binds=True,
)
def test_render_expanding_parameter_render_postcompile(self):
# renders the IN the old way, essentially, but creates the bound
# parameters on the fly.
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid.in_(bindparam("foo", [1, 2, 3], expanding=True))
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid IN (:foo_1, :foo_2, :foo_3)",
render_postcompile=True,
checkparams={"foo_1": 1, "foo_2": 2, "foo_3": 3},
)
@testing.combinations(
(
select(table1.c.myid).where(
table1.c.myid == bindparam("x", value=None)
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid = NULL",
True,
None,
),
(
select(table1.c.myid).where(table1.c.myid == None),
"SELECT mytable.myid FROM mytable WHERE mytable.myid IS NULL",
False,
None,
),
(
select(table1.c.myid, None),
"SELECT mytable.myid, NULL AS anon_1 FROM mytable",
False,
None,
),
(
select(table1.c.myid).where(
table1.c.myid.is_(bindparam("x", value=None))
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid IS NULL",
False,
None,
),
(
# as of SQLAlchemy 1.4, values like these are considered to be
# SQL expressions up front, so it is coerced to null()
# immediately and no bindparam() is created
table1.insert().values({"myid": None}),
"INSERT INTO mytable (myid) VALUES (NULL)",
False,
None,
),
(table1.insert(), "INSERT INTO mytable DEFAULT VALUES", False, {}),
(
table1.update().values({"myid": None}),
"UPDATE mytable SET myid=NULL",
False,
None,
),
(
table1.update()
.where(table1.c.myid == bindparam("x"))
.values({"myid": None}),
"UPDATE mytable SET myid=NULL WHERE mytable.myid = NULL",
True,
None,
),
)
def test_render_nulls_literal_binds(self, stmt, expected, warns, params):
if warns:
with testing.expect_warnings(
r"Bound parameter '.*?' rendering literal "
"NULL in a SQL expression"
):
self.assert_compile(
stmt, expected, literal_binds=True, params=params
)
else:
self.assert_compile(
stmt, expected, literal_binds=True, params=params
)
standalone_escape = testing.combinations(
("normalname", "normalname"),
("_name", "_name"),
("[BracketsAndCase]", "_BracketsAndCase_"),
("has spaces", "has_spaces"),
argnames="paramname, expected",
)
@standalone_escape
@testing.variation("use_positional", [True, False])
def test_standalone_bindparam_escape(
self, paramname, expected, use_positional
):
stmt = select(table1.c.myid).where(
table1.c.name == bindparam(paramname, value="x")
)
if use_positional:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = ?",
params={paramname: "y"},
checkpositional=("y",),
dialect="sqlite",
)
else:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = :%s"
% (expected,),
params={paramname: "y"},
checkparams={expected: "y"},
dialect="default",
)
@testing.variation("use_positional", [True, False])
def test_standalone_bindparam_escape_collision(self, use_positional):
"""this case is currently not supported
it's kinda bad since positional takes the unescaped param
while non positional takes the escaped one.
"""
stmt = select(table1.c.myid).where(
table1.c.name == bindparam("[brackets]", value="x"),
table1.c.description == bindparam("_brackets_", value="y"),
)
if use_positional:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = ? "
"AND mytable.description = ?",
params={"[brackets]": "a", "_brackets_": "b"},
checkpositional=("a", "a"),
dialect="sqlite",
)
else:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = "
":_brackets_ AND mytable.description = :_brackets_",
params={"[brackets]": "a", "_brackets_": "b"},
checkparams={"_brackets_": "b"},
dialect="default",
)
paramstyle = testing.variation("paramstyle", ["named", "qmark", "numeric"])
@standalone_escape
@paramstyle
def test_standalone_bindparam_escape_expanding_compile(
self, paramname, expected, paramstyle
):
stmt = select(table1.c.myid).where(
table1.c.name.in_(bindparam(paramname, value=["a", "b"]))
)
if paramstyle.qmark:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"WHERE mytable.name IN (?, ?)",
params={paramname: ["y", "z", "q"]},
checkpositional=("y", "z", "q"),
dialect="sqlite",
render_postcompile=True,
)
elif paramstyle.numeric:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"WHERE mytable.name IN (:1, :2)",
params={paramname: ["y", "z", "q"]},
checkpositional=("y", "z", "q"),
dialect=sqlite.dialect(paramstyle="numeric"),
render_postcompile=True,
)
elif paramstyle.named:
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name IN "
"(:%s_1, :%s_2)" % (expected, expected),
params={paramname: ["y", "z"]},
checkparams={
"%s_1" % expected: "y",
"%s_2" % expected: "z",
},
dialect="default",
render_postcompile=True,
)
else:
paramstyle.fail()
@standalone_escape
@paramstyle
def test_standalone_bindparam_escape_expanding(
self, paramname, expected, paramstyle
):
stmt = select(table1.c.myid).where(
table1.c.name.in_(bindparam(paramname, value=["a", "b"]))
)
# this is what DefaultDialect actually does.
# this should be matched to DefaultDialect._init_compiled()
if paramstyle.qmark:
dialect = default.DefaultDialect(paramstyle="qmark")
elif paramstyle.numeric:
dialect = default.DefaultDialect(paramstyle="numeric")
else:
dialect = default.DefaultDialect()
compiled = stmt.compile(dialect=dialect)
checkparams = compiled.construct_params(
{paramname: ["y", "z"]}, escape_names=False
)
# nothing actually happened. if the compiler had
# render_postcompile set, the
# above weird param thing happens
eq_(checkparams, {paramname: ["y", "z"]})
expanded_state = compiled._process_parameters_for_postcompile(
checkparams
)
eq_(
expanded_state.additional_parameters,
{f"{expected}_1": "y", f"{expected}_2": "z"},
)
if paramstyle.qmark or paramstyle.numeric:
eq_(
expanded_state.positiontup,
[f"{expected}_1", f"{expected}_2"],
)
@paramstyle
def test_expanding_in_repeated(self, paramstyle):
stmt = (
select(table1)
.where(
table1.c.name.in_(
bindparam("uname", value=["h", "e"], expanding=True)
)
| table1.c.name.in_(
bindparam("uname2", value=["y"], expanding=True)
)
)
.where(table1.c.myid == 8)
)
stmt = stmt.union(
select(table1)
.where(
table1.c.name.in_(
bindparam("uname", value=["h", "e"], expanding=True)
)
| table1.c.name.in_(
bindparam("uname2", value=["y"], expanding=True)
)
)
.where(table1.c.myid == 9)
).order_by("myid")
if paramstyle.qmark:
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (?, ?) OR "
"mytable.name IN (?)) "
"AND mytable.myid = ? "
"UNION SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (?, ?) OR "
"mytable.name IN (?)) "
"AND mytable.myid = ? ORDER BY myid",
params={"uname": ["y", "z"], "uname2": ["a"]},
checkpositional=("y", "z", "a", 8, "y", "z", "a", 9),
dialect="sqlite",
render_postcompile=True,
)
elif paramstyle.numeric:
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (:3, :4) OR "
"mytable.name IN (:5)) "
"AND mytable.myid = :1 "
"UNION SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (:3, :4) OR "
"mytable.name IN (:5)) "
"AND mytable.myid = :2 ORDER BY myid",
params={"uname": ["y", "z"], "uname2": ["a"]},
checkpositional=(8, 9, "y", "z", "a"),
dialect=sqlite.dialect(paramstyle="numeric"),
render_postcompile=True,
)
elif paramstyle.named:
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (:uname_1, :uname_2) OR "
"mytable.name IN (:uname2_1)) "
"AND mytable.myid = :myid_1 "
"UNION SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE (mytable.name IN (:uname_1, :uname_2) OR "
"mytable.name IN (:uname2_1)) "
"AND mytable.myid = :myid_2 ORDER BY myid",
params={"uname": ["y", "z"], "uname2": ["a"]},
checkparams={
"myid_1": 8,
"myid_2": 9,
"uname_1": "y",
"uname_2": "z",
"uname2_1": "a",
},
dialect="default",
render_postcompile=True,
)
else:
paramstyle.fail()
def test_numeric_dollar_bindparam(self):
stmt = table1.select().where(
table1.c.name == "a", table1.c.myid.in_([1, 2])
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable "
"WHERE mytable.name = $1 "
"AND mytable.myid IN ($2, $3)",
checkpositional=("a", 1, 2),
dialect=default.DefaultDialect(paramstyle="numeric_dollar"),
render_postcompile=True,
)
def test_bind_escape_extensibility(self):
"""test #8994, extensibility of the bind escape character lookup.
The main test for actual known characters passing through for bound
params is in
sqlalchemy.testing.suite.test_dialect.DifficultParametersTest.
"""
dialect = default.DefaultDialect()
class Compiler(compiler.StrSQLCompiler):
bindname_escape_characters = {
"%": "P",
# chars that need regex escaping
"(": "A",
")": "Z",
"*": "S",
"+": "L",
# completely random "normie" character
"8": "E",
":": "C",
# left bracket is not escaped, right bracket is
"]": "_",
" ": "_",
}
dialect.statement_compiler = Compiler
self.assert_compile(
select(
bindparam("number8ight"),
bindparam("plus+sign"),
bindparam("par(en)s and [brackets]"),
),
"SELECT :numberEight AS anon_1, :plusLsign AS anon_2, "
":parAenZs_and_[brackets_ AS anon_3",
dialect=dialect,
)
| BindParameterTest |
python | run-llama__llama_index | llama-index-integrations/storage/docstore/llama-index-storage-docstore-elasticsearch/llama_index/storage/docstore/elasticsearch/base.py | {
"start": 255,
"end": 1730
} | class ____(KVDocumentStore):
"""
Elasticsearch Document (Node) store.
An Elasticsearch store for Document and Node objects.
Args:
elasticsearch_kvstore (ElasticsearchKVStore): Elasticsearch key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
elasticsearch_kvstore: ElasticsearchKVStore,
namespace: Optional[str] = None,
node_collection_index: str = None,
ref_doc_collection_index: str = None,
metadata_collection_index: str = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a ElasticsearchDocumentStore."""
super().__init__(
elasticsearch_kvstore, namespace=namespace, batch_size=batch_size
)
if node_collection_index:
self._node_collection = node_collection_index
else:
self._node_collection = f"llama_index-docstore.data-{self._namespace}"
if ref_doc_collection_index:
self._ref_doc_collection = ref_doc_collection_index
else:
self._ref_doc_collection = (
f"llama_index-docstore.ref_doc_info-{self._namespace}"
)
if metadata_collection_index:
self._metadata_collection = metadata_collection_index
else:
self._metadata_collection = (
f"llama_index-docstore.metadata-{self._namespace}"
)
| ElasticsearchDocumentStore |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread.py | {
"start": 459,
"end": 700
} | class ____(BaseModel):
reason: Optional[str] = None
"""Reason that the thread was locked. Defaults to null when no reason is recorded."""
type: Literal["locked"]
"""Status discriminator that is always `locked`."""
| StatusLocked |
python | django__django | django/contrib/auth/forms.py | {
"start": 20462,
"end": 20798
} | class ____(SetUnusablePasswordMixin, UserCreationForm):
usable_password = SetUnusablePasswordMixin.create_usable_password_field()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["password1"].required = False
self.fields["password2"].required = False
| AdminUserCreationForm |
python | getsentry__sentry | src/sentry/ratelimits/config.py | {
"start": 2397,
"end": 3596
} | class ____:
group: str = field(default="default")
limit_overrides: RateLimitOverrideDict | _sentinel = field(default=_sentinel())
def has_custom_limit(self) -> bool:
return not isinstance(self.limit_overrides, _sentinel)
def get_rate_limit(self, http_method: str, category: RateLimitCategory) -> RateLimit:
if isinstance(self.limit_overrides, _sentinel):
return get_default_rate_limits_for_group(self.group, category)
override_rate_limit = self.limit_overrides.get(http_method, {}).get(category, None)
if isinstance(override_rate_limit, RateLimit):
return override_rate_limit
return get_default_rate_limits_for_group(self.group, category)
@classmethod
def from_rate_limit_override_dict(
cls, rate_limit_override_dict: RateLimitConfig | RateLimitOverrideDict
) -> RateLimitConfig:
if isinstance(rate_limit_override_dict, cls):
return rate_limit_override_dict
elif isinstance(rate_limit_override_dict, dict):
return cls(limit_overrides=rate_limit_override_dict)
raise InvalidRateLimitConfig
DEFAULT_RATE_LIMIT_CONFIG = RateLimitConfig()
| RateLimitConfig |
python | tensorflow__tensorflow | tensorflow/python/saved_model/registration/registration.py | {
"start": 1131,
"end": 15606
} | class ____(object):
"""Registry with predicate-based lookup.
See the documentation for `register_checkpoint_saver` and
`register_serializable` for reasons why predicates are required over a
class-based registry.
Since this class is used for global registries, each object must be registered
to unique names (an error is raised if there are naming conflicts). The lookup
searches the predicates in reverse order, so that later-registered predicates
are executed first.
"""
__slots__ = ("_registry_name", "_registered_map", "_registered_predicates",
"_registered_names")
def __init__(self, name):
self._registry_name = name
# Maps registered name -> object
self._registered_map = {}
# Maps registered name -> predicate
self._registered_predicates = {}
# Stores names in the order of registration
self._registered_names = []
@property
def name(self):
return self._registry_name
def register(self, package, name, predicate, candidate):
"""Registers a candidate object under the package, name and predicate."""
if not isinstance(package, str) or not isinstance(name, str):
raise TypeError(
f"The package and name registered to a {self.name} must be strings, "
f"got: package={type(package)}, name={type(name)}")
if not callable(predicate):
raise TypeError(
f"The predicate registered to a {self.name} must be callable, "
f"got: {type(predicate)}")
registered_name = package + "." + name
if not _VALID_REGISTERED_NAME.match(registered_name):
raise ValueError(
f"Invalid registered {self.name}. Please check that the package and "
f"name follow the regex '{_VALID_REGISTERED_NAME.pattern}': "
f"(package='{package}', name='{name}')")
if registered_name in self._registered_map:
raise ValueError(
f"The name '{registered_name}' has already been registered to a "
f"{self.name}. Found: {self._registered_map[registered_name]}")
self._registered_map[registered_name] = candidate
self._registered_predicates[registered_name] = predicate
self._registered_names.append(registered_name)
def lookup(self, obj):
"""Looks up the registered object using the predicate.
Args:
obj: Object to pass to each of the registered predicates to look up the
registered object.
Returns:
The object registered with the first passing predicate.
Raises:
LookupError if the object does not match any of the predicate functions.
"""
return self._registered_map[self.get_registered_name(obj)]
def name_lookup(self, registered_name):
"""Looks up the registered object using the registered name."""
try:
return self._registered_map[registered_name]
except KeyError:
raise LookupError(f"The {self.name} registry does not have name "
f"'{registered_name}' registered.")
def get_registered_name(self, obj):
for registered_name in reversed(self._registered_names):
predicate = self._registered_predicates[registered_name]
if predicate(obj):
return registered_name
raise LookupError(f"Could not find matching {self.name} for {type(obj)}.")
def get_predicate(self, registered_name):
try:
return self._registered_predicates[registered_name]
except KeyError:
raise LookupError(f"The {self.name} registry does not have name "
f"'{registered_name}' registered.")
def get_registrations(self):
return self._registered_predicates
_class_registry = _PredicateRegistry("serializable class")
_saver_registry = _PredicateRegistry("checkpoint saver")
def get_registered_class_name(obj):
try:
return _class_registry.get_registered_name(obj)
except LookupError:
return None
def get_registered_class(registered_name):
try:
return _class_registry.name_lookup(registered_name)
except LookupError:
return None
def register_serializable(package="Custom", name=None, predicate=None): # pylint: disable=unused-argument
"""Decorator for registering a serializable class.
THIS METHOD IS STILL EXPERIMENTAL AND MAY CHANGE AT ANY TIME.
Registered classes will be saved with a name generated by combining the
`package` and `name` arguments. When loading a SavedModel, modules saved with
this registered name will be created using the `_deserialize_from_proto`
method.
By default, only direct instances of the registered class will be saved/
restored with the `serialize_from_proto`/`deserialize_from_proto` methods. To
extend the registration to subclasses, use the `predicate argument`:
```python
class A(tf.Module):
pass
register_serializable(
package="Example", predicate=lambda obj: isinstance(obj, A))(A)
```
Args:
package: The package that this class belongs to.
name: The name to serialize this class under in this package. If None, the
class's name will be used.
predicate: An optional function that takes a single Trackable argument, and
determines whether that object should be serialized with this `package`
and `name`. The default predicate checks whether the object's type exactly
matches the registered class. Predicates are executed in the reverse order
that they are added (later registrations are checked first).
Returns:
A decorator that registers the decorated class with the passed names and
predicate.
"""
def decorator(arg):
"""Registers a class with the serialization framework."""
nonlocal predicate
if not tf_inspect.isclass(arg):
raise TypeError("Registered serializable must be a class: {}".format(arg))
class_name = name if name is not None else arg.__name__
if predicate is None:
predicate = lambda x: isinstance(x, arg)
_class_registry.register(package, class_name, predicate, arg)
return arg
return decorator
RegisteredSaver = collections.namedtuple(
"RegisteredSaver", ["name", "predicate", "save_fn", "restore_fn"])
_REGISTERED_SAVERS = {}
_REGISTERED_SAVER_NAMES = [] # Stores names in the order of registration
def register_checkpoint_saver(package="Custom",
name=None,
predicate=None,
save_fn=None,
restore_fn=None,
strict_predicate_restore=True):
"""Registers functions which checkpoints & restores objects with custom steps.
If you have a class that requires complicated coordination between multiple
objects when checkpointing, then you will need to register a custom saver
and restore function. An example of this is a custom Variable class that
splits the variable across different objects and devices, and needs to write
checkpoints that are compatible with different configurations of devices.
The registered save and restore functions are used in checkpoints and
SavedModel.
Please make sure you are familiar with the concepts in the [Checkpointing
guide](https://www.tensorflow.org/guide/checkpoint), and ops used to save the
V2 checkpoint format:
* io_ops.SaveV2
* io_ops.MergeV2Checkpoints
* io_ops.RestoreV2
**Predicate**
The predicate is a filter that will run on every `Trackable` object connected
to the root object. This function determines whether a `Trackable` should use
the registered functions.
Example: `lambda x: isinstance(x, CustomClass)`
**Custom save function**
This is how checkpoint saving works normally:
1. Gather all of the Trackables with saveable values.
2. For each Trackable, gather all of the saveable tensors.
3. Save checkpoint shards (grouping tensors by device) with SaveV2
4. Merge the shards with MergeCheckpointV2. This combines all of the shard's
metadata, and renames them to follow the standard shard pattern.
When a saver is registered, Trackables that pass the registered `predicate`
are automatically marked as having saveable values. Next, the custom save
function replaces steps 2 and 3 of the saving process. Finally, the shards
returned by the custom save function are merged with the other shards.
The save function takes in a dictionary of `Trackables` and a `file_prefix`
string. The function should save checkpoint shards using the SaveV2 op, and
list of the shard prefixes. SaveV2 is currently required to work a correctly,
because the code merges all of the returned shards, and the `restore_fn` will
only be given the prefix of the merged checkpoint. If you need to be able to
save and restore from unmerged shards, please file a feature request.
Specification and example of the save function:
```
def save_fn(trackables, file_prefix):
# trackables: A dictionary mapping unique string identifiers to trackables
# file_prefix: A unique file prefix generated using the registered name.
...
# Gather the tensors to save.
...
io_ops.SaveV2(file_prefix, tensor_names, shapes_and_slices, tensors)
return file_prefix # Returns a tensor or a list of string tensors
```
The save function is executed before the unregistered save ops.
**Custom restore function**
Normal checkpoint restore behavior:
1. Gather all of the Trackables that have saveable values.
2. For each Trackable, get the names of the desired tensors to extract from
the checkpoint.
3. Use RestoreV2 to read the saved values, and pass the restored tensors to
the corresponding Trackables.
The custom restore function replaces steps 2 and 3.
The restore function also takes a dictionary of `Trackables` and a
`merged_prefix` string. The `merged_prefix` is different from the
`file_prefix`, since it contains the renamed shard paths. To read from the
merged checkpoint, you must use `RestoreV2(merged_prefix, ...)`.
Specification:
```
def restore_fn(trackables, merged_prefix):
# trackables: A dictionary mapping unique string identifiers to Trackables
# merged_prefix: File prefix of the merged shard names.
restored_tensors = io_ops.restore_v2(
merged_prefix, tensor_names, shapes_and_slices, dtypes)
...
# Restore the checkpoint values for the given Trackables.
```
The restore function is executed after the non-registered restore ops.
Args:
package: Optional, the package that this class belongs to.
name: (Required) The name of this saver, which is saved to the checkpoint.
When a checkpoint is restored, the name and package are used to find the
the matching restore function. The name and package are also used to
generate a unique file prefix that is passed to the save_fn.
predicate: (Required) A function that returns a boolean indicating whether a
`Trackable` object should be checkpointed with this function. Predicates
are executed in the reverse order that they are added (later registrations
are checked first).
save_fn: (Required) A function that takes a dictionary of trackables and a
file prefix as the arguments, writes the checkpoint shards for the given
Trackables, and returns the list of shard prefixes.
restore_fn: (Required) A function that takes a dictionary of trackables and
a file prefix as the arguments and restores the trackable values.
strict_predicate_restore: If this is `True` (default), then an error will be
raised if the predicate fails during checkpoint restoration. If this is
`True`, checkpoint restoration will skip running the restore function.
This value is generally set to `False` when the predicate does not pass on
the Trackables after being saved/loaded from SavedModel.
Raises:
ValueError: if the package and name are already registered.
"""
if not callable(save_fn):
raise TypeError(f"The save_fn must be callable, got: {type(save_fn)}")
if not callable(restore_fn):
raise TypeError(f"The restore_fn must be callable, got: {type(restore_fn)}")
_saver_registry.register(package, name, predicate, (save_fn, restore_fn,
strict_predicate_restore))
def get_registered_saver_name(trackable):
"""Returns the name of the registered saver to use with Trackable."""
try:
return _saver_registry.get_registered_name(trackable)
except LookupError:
return None
def get_save_function(registered_name):
"""Returns save function registered to name."""
return _saver_registry.name_lookup(registered_name)[0]
def get_restore_function(registered_name):
"""Returns restore function registered to name."""
return _saver_registry.name_lookup(registered_name)[1]
def get_strict_predicate_restore(registered_name):
"""Returns if the registered restore can be ignored if the predicate fails."""
try:
return _saver_registry.name_lookup(registered_name)[2]
except LookupError:
logging.warning(
"Registered saver %s was not found when restoring checkpoints.",
registered_name,
)
return False # Return false as the default if the name isn't registered.
def validate_restore_function(trackable, registered_name):
"""Validates whether the trackable can be restored with the saver.
When using a checkpoint saved with a registered saver, that same saver must
also be also registered when loading. The name of that saver is saved to the
checkpoint and set in the `registered_name` arg.
Args:
trackable: A `Trackable` object.
registered_name: String name of the expected registered saver. This argument
should be set using the name saved in a checkpoint.
Raises:
ValueError if the saver could not be found, or if the predicate associated
with the saver does not pass.
"""
try:
_saver_registry.name_lookup(registered_name)
except LookupError:
raise ValueError(
f"Error when restoring object {trackable} from checkpoint. This "
"object was saved using a registered saver named "
f"'{registered_name}', but this saver cannot be found in the "
"current context.")
if not _saver_registry.get_predicate(registered_name)(trackable):
raise ValueError(
f"Object {trackable} was saved with the registered saver named "
f"'{registered_name}'. However, this saver cannot be used to restore the "
"object because the predicate does not pass.")
| _PredicateRegistry |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 31438,
"end": 31473
} | class ____(Cov):
corr = True
| Corr |
python | getsentry__sentry | tests/sentry/integrations/repository/metric_alert/test_metric_alert_notification_message_repository.py | {
"start": 307,
"end": 2328
} | class ____(TestCase):
def setUp(self) -> None:
self.incident = self.create_incident()
self.trigger_action = self.create_alert_rule_trigger_action()
self.parent_notification_message = NotificationMessage.objects.create(
incident=self.incident,
trigger_action=self.trigger_action,
message_identifier="123abc",
)
self.repository = MetricAlertNotificationMessageRepository.default()
def test_returns_parent_notification_message(self) -> None:
instance = self.repository.get_parent_notification_message(
alert_rule_id=self.incident.alert_rule.id,
incident_id=self.incident.id,
trigger_action_id=self.trigger_action.id,
)
assert instance is not None
assert instance == MetricAlertNotificationMessage.from_model(
self.parent_notification_message
)
def test_returns_none_when_filter_does_not_exist(self) -> None:
instance = self.repository.get_parent_notification_message(
alert_rule_id=9999,
incident_id=self.incident.id,
trigger_action_id=self.trigger_action.id,
)
assert instance is None
def test_when_parent_has_child(self) -> None:
child = NotificationMessage.objects.create(
incident=self.incident,
trigger_action=self.trigger_action,
message_identifier="456abc",
parent_notification_message=self.parent_notification_message,
)
assert child.id != self.parent_notification_message.id
instance = self.repository.get_parent_notification_message(
alert_rule_id=self.incident.alert_rule.id,
incident_id=self.incident.id,
trigger_action_id=self.trigger_action.id,
)
assert instance is not None
assert instance == MetricAlertNotificationMessage.from_model(
self.parent_notification_message
)
| TestGetParentNotificationMessage |
python | python-openxml__python-docx | src/docx/shared.py | {
"start": 408,
"end": 1777
} | class ____(int):
"""Base class for length constructor classes Inches, Cm, Mm, Px, and Emu.
Behaves as an int count of English Metric Units, 914,400 to the inch, 36,000 to the
mm. Provides convenience unit conversion methods in the form of read-only
properties. Immutable.
"""
_EMUS_PER_INCH = 914400
_EMUS_PER_CM = 360000
_EMUS_PER_MM = 36000
_EMUS_PER_PT = 12700
_EMUS_PER_TWIP = 635
def __new__(cls, emu: int):
return int.__new__(cls, emu)
@property
def cm(self):
"""The equivalent length expressed in centimeters (float)."""
return self / float(self._EMUS_PER_CM)
@property
def emu(self):
"""The equivalent length expressed in English Metric Units (int)."""
return self
@property
def inches(self):
"""The equivalent length expressed in inches (float)."""
return self / float(self._EMUS_PER_INCH)
@property
def mm(self):
"""The equivalent length expressed in millimeters (float)."""
return self / float(self._EMUS_PER_MM)
@property
def pt(self):
"""Floating point length in points."""
return self / float(self._EMUS_PER_PT)
@property
def twips(self):
"""The equivalent length expressed in twips (int)."""
return int(round(self / float(self._EMUS_PER_TWIP)))
| Length |
python | ZoranPandovski__al-go-rithms | games/Python/Pong Game/ball.py | {
"start": 29,
"end": 659
} | class ____(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("blue violet")
self.shape("circle")
self.move_speed=0.1
self.x_move = 10
self.y_move = 10
def ball_move(self):
x_pos=self.xcor() + self.x_move
y_pos=self.ycor() + self.y_move
self.goto(x=x_pos,y=y_pos)
def bounce_y(self):
self.y_move *= -1
def bounce_x(self):
self.x_move *= -1
self.move_speed *= 0.9
def reset_position(self):
self.goto(0,0)
self.move_speed=0.1
self.bounce_x()
| Ball |
python | pyparsing__pyparsing | examples/tiny/tiny_ast.py | {
"start": 16720,
"end": 16944
} | class ____(Exception):
"""Using exception mechanism to propagate return value from within
nested statements within a function.
"""
def __init__(self, value):
self.value = value
@dataclass
| ReturnPropagate |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 27835,
"end": 28380
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_inline_non_ascii"
additional_test_settings = {
"COMPRESS_OFFLINE_CONTEXT": {
"test_non_ascii_value": "\u2014",
}
}
def _test_offline(self, engine, verbosity=0):
_, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, "".join(result) + "\n")
| OfflineCompressInlineNonAsciiTestCase |
python | getsentry__sentry | src/sentry/tasks/llm_issue_detection/detection.py | {
"start": 1676,
"end": 9514
} | class ____(SeerApiError):
def __init__(
self,
message: str,
status: int,
project_id: int | None = None,
trace_id: str | None = None,
response_data: str | None = None,
error_message: str | None = None,
):
super().__init__(message, status)
self.project_id = project_id
self.trace_id = trace_id
self.response_data = response_data
self.error_message = error_message
def create_issue_occurrence_from_detection(
detected_issue: DetectedIssue,
trace: EvidenceTraceData,
project_id: int,
transaction_name: str,
) -> None:
"""
Create and produce an IssueOccurrence from an LLM-detected issue.
"""
event_id = uuid4().hex
occurrence_id = uuid4().hex
detection_time = datetime.now(UTC)
project = Project.objects.get_from_cache(id=project_id)
title = detected_issue.title.lower().replace(" ", "-")
fingerprint = [f"llm-detected-{title}-{transaction_name}"]
evidence_data = {
"trace_id": trace.trace_id,
"transaction": transaction_name,
"explanation": detected_issue.explanation,
"impact": detected_issue.impact,
"evidence": detected_issue.evidence,
"missing_telemetry": detected_issue.missing_telemetry,
}
evidence_display = [
IssueEvidence(name="Explanation", value=detected_issue.explanation, important=True),
IssueEvidence(name="Impact", value=detected_issue.impact, important=False),
IssueEvidence(name="Evidence", value=detected_issue.evidence, important=False),
]
occurrence = IssueOccurrence(
id=occurrence_id,
event_id=event_id,
project_id=project_id,
fingerprint=fingerprint,
issue_title=detected_issue.title,
subtitle=detected_issue.explanation[:200], # Truncate for subtitle
resource_id=None,
evidence_data=evidence_data,
evidence_display=evidence_display,
type=LLMDetectedExperimentalGroupType,
detection_time=detection_time,
culprit=transaction_name,
level="warning",
)
event_data = {
"event_id": event_id,
"project_id": project_id,
"platform": project.platform or "other",
"received": detection_time.isoformat(),
"timestamp": detection_time.isoformat(),
"transaction": transaction_name,
"contexts": {
"trace": {
"trace_id": trace.trace_id,
"type": "trace",
}
},
}
produce_occurrence_to_kafka(
payload_type=PayloadType.OCCURRENCE,
occurrence=occurrence,
event_data=event_data,
)
def get_enabled_project_ids() -> list[int]:
"""
Get the list of project IDs that are explicitly enabled for LLM detection.
Returns the allowlist from system options.
"""
return options.get("issue-detection.llm-detection.projects-allowlist")
@instrumented_task(
name="sentry.tasks.llm_issue_detection.run_llm_issue_detection",
namespace=issues_tasks,
processing_deadline_duration=120,
)
def run_llm_issue_detection() -> None:
"""
Main scheduled task for LLM issue detection.
"""
if not options.get("issue-detection.llm-detection.enabled"):
return
enabled_project_ids = get_enabled_project_ids()
if not enabled_project_ids:
return
# Spawn a sub-task for each project
for project_id in enabled_project_ids:
detect_llm_issues_for_project.delay(project_id)
@instrumented_task(
name="sentry.tasks.llm_issue_detection.detect_llm_issues_for_project",
namespace=issues_tasks,
processing_deadline_duration=300,
)
def detect_llm_issues_for_project(project_id: int) -> None:
"""
Process a single project for LLM issue detection.
"""
project = Project.objects.get_from_cache(id=project_id)
organization = project.organization
organization_id = organization.id
has_access = features.has("organizations:gen-ai-features", organization) and not bool(
organization.get_option("sentry:hide_ai_features")
)
if not has_access:
return
transactions = get_transactions_for_project(
project_id, limit=50, start_time_delta={"minutes": 30}
)
if not transactions:
return
# Shuffle transactions to randomize order
random.shuffle(transactions)
processed_count = 0
for transaction in transactions:
if processed_count >= NUM_TRANSACTIONS_TO_PROCESS:
break
try:
trace = get_evidence_trace_for_llm_detection(transaction.name, transaction.project_id)
if (
not trace
or trace.total_spans < LOWER_SPAN_LIMIT
or trace.total_spans > UPPER_SPAN_LIMIT
):
continue
processed_count += 1
logger.info(
"Found trace for LLM issue detection",
extra={
"trace_id": trace.trace_id,
"project_id": project_id,
"total_spans": trace.total_spans,
"transaction_name": trace.transaction_name,
},
)
seer_request = {
"telemetry": [{**trace.dict(), "kind": "trace"}],
"organization_id": organization_id,
"project_id": project_id,
}
response = make_signed_seer_api_request(
connection_pool=seer_issue_detection_connection_pool,
path=SEER_ANALYZE_ISSUE_ENDPOINT_PATH,
body=json.dumps(seer_request).encode("utf-8"),
)
if response.status < 200 or response.status >= 300:
raise LLMIssueDetectionError(
message="Seer HTTP error",
status=response.status,
project_id=project_id,
trace_id=trace.trace_id,
response_data=response.data.decode("utf-8"),
)
try:
raw_response_data = response.json()
response_data = IssueDetectionResponse.parse_obj(raw_response_data)
except (ValueError, TypeError) as e:
raise LLMIssueDetectionError(
message="Seer response parsing error",
status=response.status,
project_id=project_id,
trace_id=trace.trace_id,
response_data=response.data.decode("utf-8"),
error_message=str(e),
)
n_found_issues = len(response_data.issues)
logger.info(
"Seer issue detection success",
extra={
"num_issues": n_found_issues,
"trace_id": trace.trace_id,
"project_id": project_id,
"titles": (
[issue.title for issue in response_data.issues]
if n_found_issues > 0
else None
),
},
)
for detected_issue in response_data.issues:
try:
create_issue_occurrence_from_detection(
detected_issue=detected_issue,
trace=trace,
project_id=project_id,
transaction_name=transaction.name,
)
except Exception as e:
sentry_sdk.capture_exception(e)
except LLMIssueDetectionError as e:
sentry_sdk.capture_exception(e)
continue # if one transaction encounters an error, don't block processing of the others
| LLMIssueDetectionError |
python | keras-team__keras | keras/src/quantizers/gptq_core_test.py | {
"start": 321,
"end": 566
} | class ____:
"""A mock tokenizer that mimics the real API for testing."""
def tokenize(self, text):
return [ord(c) % VOCAB_SIZE for c in "".join(text)]
def __call__(self, text):
return self.tokenize(text)
| MockTokenizer |
python | huggingface__transformers | tests/models/textnet/test_image_processing_textnet.py | {
"start": 1013,
"end": 3305
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
size_divisor=32,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| TextNetImageProcessingTester |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 51190,
"end": 59896
} | class ____(ConditionalDetrPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for Conditional DETR:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: ConditionalDetrConfig
"""
def __init__(self, config: ConditionalDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
# in Conditional DETR, the decoder uses layernorm after the last decoder layer output
self.layernorm = nn.LayerNorm(config.d_model)
d_model = config.d_model
self.gradient_checkpointing = False
# query_scale is the FFN applied on f to generate transformation T
self.query_scale = MLP(d_model, d_model, d_model, 2)
self.ref_point_head = MLP(d_model, d_model, 2, 2)
for layer_id in range(config.decoder_layers - 1):
self.layers[layer_id + 1].ca_qpos_proj = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
object_queries=None,
query_position_embeddings=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
input_shape = inputs_embeds.size()[:-1]
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# optional intermediate hidden states
intermediate = () if self.config.auxiliary_loss else None
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
reference_points_before_sigmoid = self.ref_point_head(
query_position_embeddings
) # [num_queries, batch_size, 2]
reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1)
obj_center = reference_points[..., :2].transpose(0, 1)
# get sine embedding for the query vector
query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center, self.config.d_model)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
if idx == 0:
pos_transformation = 1
else:
pos_transformation = self.query_scale(hidden_states)
# apply transformation
query_sine_embed = query_sine_embed_before_transformation * pos_transformation
layer_outputs = decoder_layer(
hidden_states,
None, # attention_mask
object_queries,
query_position_embeddings,
query_sine_embed,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
is_first=(idx == 0),
)
hidden_states = layer_outputs[0]
if self.config.auxiliary_loss:
hidden_states = self.layernorm(hidden_states)
intermediate += (hidden_states,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# finally, apply layernorm
hidden_states = self.layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
# stack intermediate decoder activations
if self.config.auxiliary_loss:
intermediate = torch.stack(intermediate)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attns,
all_cross_attentions,
intermediate,
reference_points,
]
if v is not None
)
return ConditionalDetrDecoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
intermediate_hidden_states=intermediate,
reference_points=reference_points,
)
@auto_docstring(
custom_intro="""
The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
hidden-states without any specific head on top.
"""
)
| ConditionalDetrDecoder |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 138923,
"end": 139991
} | class ____(Response):
"""
Response of dataviews.publish endpoint.
:param published: Number of dataviews published (0 or 1)
:type published: float
"""
_service = "dataviews"
_action = "publish"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"published": {
"description": "Number of dataviews published (0 or 1)",
"enum": [0, 1],
"type": ["number", "null"],
}
},
"type": "object",
}
def __init__(self, published=None, **kwargs):
super(PublishResponse, self).__init__(**kwargs)
self.published = published
@schema_property("published")
def published(self):
return self._property_published
@published.setter
def published(self, value):
if value is None:
self._property_published = None
return
self.assert_isinstance(value, "published", six.integer_types + (float,))
self._property_published = value
| PublishResponse |
python | Pylons__pyramid | tests/test_url.py | {
"start": 51260,
"end": 53171
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeRequest(self, route):
from pyramid.request import Request
return Request.blank(route)
def test_old_route_is_preserved(self):
self.config.route_prefix = 'old_prefix'
with self.config.route_prefix_context('new_addon'):
assert 'new_addon' in self.config.route_prefix
assert 'old_prefix' == self.config.route_prefix
def test_route_prefix_none(self):
self.config.route_prefix = 'old_prefix'
with self.config.route_prefix_context(None):
assert 'old_prefix' == self.config.route_prefix
assert 'old_prefix' == self.config.route_prefix
def test_route_prefix_empty(self):
self.config.route_prefix = 'old_prefix'
with self.config.route_prefix_context(''):
assert 'old_prefix' == self.config.route_prefix
assert 'old_prefix' == self.config.route_prefix
def test_route_has_prefix(self):
with self.config.route_prefix_context('bar'):
self.config.add_route('acme', '/foo')
request = self._makeRequest('/')
self.assertEqual(request.route_url('acme'), 'http://localhost/bar/foo')
def test_route_does_not_have_prefix(self):
with self.config.route_prefix_context('bar'):
pass
self.config.add_route('acme', '/foo')
request = self._makeRequest('/')
self.assertEqual(request.route_url('acme'), 'http://localhost/foo')
def test_error_reset_prefix(self):
self.config.route_prefix = 'old_prefix'
try:
with self.config.route_prefix_context('new_prefix'):
raise RuntimeError
except RuntimeError:
pass
assert self.config.route_prefix == 'old_prefix'
| Test_with_route_prefix |
python | mahmoud__boltons | misc/table_html_app.py | {
"start": 639,
"end": 2337
} | class ____:
_html_doctype = '<!doctype html>'
_html_wrapper, _html_wrapper_close = '<html>', '</html>'
_html_table_tag = '<table class="clastic-atr-table">'
_html_style_content = _STYLE
def __init__(self, max_depth=4, orientation='auto'):
self.max_depth = max_depth
self.orientation = orientation
def _html_format_ep(self, route):
# TODO: callable object endpoints?
module_name = route.endpoint.__module__
try:
func_name = route.endpoint.func_name
except:
func_name = repr(route.endpoint)
args, _, _, _ = getargspec(route.endpoint)
argstr = ', '.join(args)
title = ('<h2><small><sub>%s</sub></small><br/>%s(%s)</h2>'
% (module_name, func_name, argstr))
return title
def __call__(self, context, _route):
content_parts = [self._html_wrapper]
if self._html_style_content:
content_parts.extend(['<head><style type="text/css">',
self._html_style_content,
'</style></head>'])
content_parts.append('<body>')
title = self._html_format_ep(_route)
content_parts.append(title)
table = Table.from_data(context, max_depth=self.max_depth)
table._html_table_tag = self._html_table_tag
content = table.to_html(max_depth=self.max_depth,
orientation=self.orientation)
content_parts.append(content)
content_parts.append('</body>')
content_parts.append(self._html_wrapper_close)
return Response('\n'.join(content_parts), mimetype='text/html')
| AutoTableRenderer |
python | pytorch__pytorch | test/test_openmp.py | {
"start": 301,
"end": 487
} | class ____(torch.nn.Module):
maxp1 = torch.nn.MaxPool2d(1, 1)
def forward(self, x):
return self.maxp1(x)
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
| Network |
python | spack__spack | lib/spack/spack/vendor/jinja2/exceptions.py | {
"start": 1291,
"end": 2424
} | class ____(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(
self,
names: t.Sequence[t.Union[str, "Undefined"]] = (),
message: t.Optional[str] = None,
) -> None:
if message is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
parts_str = ", ".join(map(str, parts))
message = f"none of the templates given were found: {parts_str}"
super().__init__(names[-1] if names else None, message)
self.templates = list(names)
| TemplatesNotFound |
python | django__django | tests/admin_views/tests.py | {
"start": 161216,
"end": 162011
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name="hott")
response = self.client.get(reverse("admin:admin_views_funkytag_changelist"))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF="admin_views.urls")
| TestGenericRelations |
python | ray-project__ray | python/ray/serve/_private/benchmarks/common.py | {
"start": 5770,
"end": 6311
} | class ____:
def __init__(self, tokens_per_request: int, inter_token_delay_ms: int = 10):
logging.getLogger("ray.serve").setLevel(logging.WARNING)
self._tokens_per_request = tokens_per_request
self._inter_token_delay_s = inter_token_delay_ms / 1000
async def stream(self):
for _ in range(self._tokens_per_request):
await asyncio.sleep(self._inter_token_delay_s)
yield b"hi"
async def __call__(self):
return StreamingResponse(self.stream())
@serve.deployment
| Streamer |
python | weaviate__weaviate-python-client | weaviate/embedded.py | {
"start": 1386,
"end": 11209
} | class ____:
def __init__(self, options: EmbeddedOptions) -> None:
self.options = options
self.grpc_port: int = options.grpc_port
self.process: Optional[subprocess.Popen[bytes]] = None
self.ensure_paths_exist()
self.check_supported_platform()
self._parsed_weaviate_version = ""
# regular expression to detect a version number: v[one digit].[1-2 digits].[1-2 digits]
# optionally there can be a "-rc/alpha/beta.[1-2 digits]"
# nothing in front or back
version_pattern = re.compile(
r"^\d\.\d{1,2}\.\d{1,2}?(-rc\.\d{1,2}|-beta\.\d{1,2}|-alpha\.\d{1,2}|$)$"
)
valid_url = validators.url(self.options.version)
if isinstance(valid_url, validators.ValidationError):
valid_url = validators.url(self.options.version, simple_host=True) # for localhost
if valid_url:
if not self.options.version.endswith(".tar.gz") and not self.options.version.endswith(
".zip"
):
raise exceptions.WeaviateEmbeddedInvalidVersionError(self.options.version)
# for GitHub urls we can parse the version from the url
if self.options.version.startswith(GITHUB_RELEASE_DOWNLOAD_URL):
# replace with str.removeprefix() after 3.8 has been deprecated
self._parsed_weaviate_version = self.options.version[
len(GITHUB_RELEASE_DOWNLOAD_URL) :
].split("/")[0]
self._download_url = self.options.version
elif version_pattern.match(self.options.version):
version_tag = "v" + self.options.version
self._parsed_weaviate_version = version_tag
self._set_download_url_from_version_tag(version_tag)
elif self.options.version == "latest":
response = httpx.get("https://api.github.com/repos/weaviate/weaviate/releases/latest")
latest = _decode_json_response_dict(response, "get tag of latest weaviate release")
assert latest is not None
version_tag = latest["tag_name"]
self._parsed_weaviate_version = version_tag
self._set_download_url_from_version_tag(version_tag)
else:
raise exceptions.WeaviateEmbeddedInvalidVersionError(self.options.version)
def _set_download_url_from_version_tag(self, version: str) -> None:
if platform.system() == "Darwin":
machine_type = "all"
package_format = "zip"
else:
machine_type = platform.machine()
if machine_type == "x86_64":
machine_type = "amd64"
elif machine_type == "aarch64":
machine_type = "arm64"
package_format = "tar.gz"
self._download_url = (
GITHUB_RELEASE_DOWNLOAD_URL
+ version
+ "/weaviate-"
+ version
+ "-"
+ platform.system()
+ "-"
+ machine_type
+ "."
+ package_format
)
def __del__(self) -> None:
self.stop()
def ensure_paths_exist(self) -> None:
Path(self.options.binary_path).mkdir(parents=True, exist_ok=True)
Path(self.options.persistence_data_path).mkdir(parents=True, exist_ok=True)
def ensure_weaviate_binary_exists(self) -> None:
self._weaviate_binary_path = Path(
self.options.binary_path,
"weaviate-"
+ self._parsed_weaviate_version
+ "-"
+ str(hashlib.sha256(self._parsed_weaviate_version.encode("utf-8")).hexdigest()),
)
if not self._weaviate_binary_path.exists():
logger.info(
f"Binary {self.options.binary_path} did not exist. Downloading binary from {self._download_url}"
)
if self._download_url.endswith(".tar.gz"):
tar_filename = Path(self.options.binary_path, "tmp_weaviate.tgz")
urllib.request.urlretrieve(self._download_url, tar_filename)
with tarfile.open(tar_filename) as binary_tar:
binary_tar.extract("weaviate", path=Path(self.options.binary_path))
tar_filename.unlink()
else:
assert self._download_url.endswith(".zip")
zip_filename = Path(self.options.binary_path, "tmp_weaviate.zip")
urllib.request.urlretrieve(self._download_url, zip_filename)
with zipfile.ZipFile(zip_filename, "r") as zip_ref:
zip_ref.extract("weaviate", path=Path(self.options.binary_path))
(Path(self.options.binary_path) / "weaviate").rename(self._weaviate_binary_path)
# Ensuring weaviate binary is executable
self._weaviate_binary_path.chmod(
self._weaviate_binary_path.stat().st_mode | stat.S_IEXEC
)
def wait_till_listening(self) -> None:
seconds = 30
sleep_interval = 0.1
retries = int(seconds / sleep_interval)
while self.is_listening() is False and retries > 0:
time.sleep(sleep_interval)
retries -= 1
if retries == 0:
raise WeaviateStartUpError(
f"Embedded DB did not start listening on port {self.options.port} within {seconds} seconds"
)
@staticmethod
def check_supported_platform() -> None:
if platform.system() in ["Windows"]:
raise WeaviateStartUpError(
f"""{platform.system()} is not supported with EmbeddedDB. Please upvote this feature request if you want
this: https://github.com/weaviate/weaviate/issues/3315""" # noqa: E231
)
def stop(self) -> None:
if self.process is not None:
try:
self.process.terminate()
self.process.wait()
except ProcessLookupError:
logger.info(
f"""Tried to stop embedded weaviate process {self.process.pid}. Process was not found. So not doing
anything"""
)
self.process = None
def ensure_running(self) -> None:
if self.is_listening() is False:
logger.info(
f"Embedded weaviate wasn't listening on ports http:{self.options.port} & grpc:{self.options.grpc_port}, so starting embedded weaviate again"
)
self.start()
def start(self) -> None:
self.ensure_weaviate_binary_exists()
my_env = os.environ.copy()
my_env.setdefault("AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED", "true")
my_env.setdefault("QUERY_DEFAULTS_LIMIT", "20")
my_env.setdefault("PERSISTENCE_DATA_PATH", self.options.persistence_data_path)
my_env.setdefault("PROFILING_PORT", str(get_random_port()))
# Limitation with weaviate server requires setting
# data_bind_port to gossip_bind_port + 1
gossip_bind_port = get_random_port()
data_bind_port = gossip_bind_port + 1
my_env.setdefault("CLUSTER_GOSSIP_BIND_PORT", str(gossip_bind_port))
my_env.setdefault("CLUSTER_DATA_BIND_PORT", str(data_bind_port))
my_env.setdefault("GRPC_PORT", str(self.grpc_port))
my_env.setdefault("RAFT_BOOTSTRAP_EXPECT", str(1))
my_env.setdefault("CLUSTER_IN_LOCALHOST", str(True))
# Each call to `get_random_port()` will likely result in
# a port 1 higher than the last time it was called. With
# this, we end up with raft_port == gossip_bind_port + 1,
# which is the same as data_bind_port. This kind of
# configuration leads to failed cross cluster communication.
# Although the current version of embedded does not support
# multi-node instances, the backup process communication
# passes through the internal cluster server, and will fail.
#
# So we here we ensure that raft_port never collides with
# data_bind_port.
raft_port = data_bind_port + 1
raft_internal_rpc_port = raft_port + 1
my_env.setdefault("RAFT_PORT", str(raft_port))
my_env.setdefault("RAFT_INTERNAL_RPC_PORT", str(raft_internal_rpc_port))
my_env.setdefault(
"ENABLE_MODULES",
"text2vec-openai,text2vec-cohere,text2vec-huggingface,ref2vec-centroid,generative-openai,qna-openai,"
"reranker-cohere",
)
# have a deterministic hostname in case of changes in the network name.
# This allows to run multiple parallel instances
cluster_hostname = f"Embedded_at_{self.options.port}"
my_env.setdefault("CLUSTER_HOSTNAME", cluster_hostname)
my_env.setdefault("RAFT_JOIN", f"{cluster_hostname}:{raft_port}")
if self.options.additional_env_vars is not None:
my_env.update(self.options.additional_env_vars)
# filter warning about running processes.
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
process = subprocess.Popen(
[
f"{self._weaviate_binary_path}",
"--host",
self.options.hostname,
"--port",
str(self.options.port),
"--scheme",
"http",
"--read-timeout=600s",
"--write-timeout=600s",
],
env=my_env,
)
self.process = process
logger.info(f"Started {self.options.binary_path}: process ID {self.process.pid}")
self.wait_till_listening()
@abstractmethod
def is_listening(self) -> bool:
raise NotImplementedError()
| _EmbeddedBase |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 41183,
"end": 42602
} | class ____(BaseDataset):
def test_no_chunks(self):
x = make_name("x")
y = make_name("y")
self.f[x] = np.arange(25).reshape(5, 5)
self.f.create_dataset_like(y, self.f[x])
dslike = self.f[y]
self.assertEqual(dslike.shape, (5, 5))
self.assertIs(dslike.chunks, None)
def test_track_times(self):
x = make_name("x")
y = make_name("y")
z = make_name("z")
w = make_name("w")
orig = self.f.create_dataset(x, data=np.arange(12),
track_times=True)
self.assertNotEqual(0, h5py.h5g.get_objinfo(orig._id).mtime)
similar = self.f.create_dataset_like(y, orig)
self.assertNotEqual(0, h5py.h5g.get_objinfo(similar._id).mtime)
orig = self.f.create_dataset(z, data=np.arange(12),
track_times=False)
self.assertEqual(0, h5py.h5g.get_objinfo(orig._id).mtime)
similar = self.f.create_dataset_like(w, orig)
self.assertEqual(0, h5py.h5g.get_objinfo(similar._id).mtime)
def test_maxshape(self):
""" Test when other.maxshape != other.shape """
other = self.f.create_dataset(make_name("x"), (10,), maxshape=20)
similar = self.f.create_dataset_like(make_name("y"), other)
self.assertEqual(similar.shape, (10,))
self.assertEqual(similar.maxshape, (20,))
| TestCreateLike |
python | jazzband__django-simple-history | simple_history/models.py | {
"start": 37365,
"end": 38689
} | class ____(models.OneToOneField):
"""
Allows one to one fields to work properly from a historic instance.
If you use as_of queries to extract historical instances from
a model, and you have other models that are related by one to
one fields and also historic, changing them to a
HistoricOneToOneField field type will allow you to naturally
cross the relationship boundary at the same point in time as
the origin instance.
A historic instance maintains an attribute ("_historic") when
it is historic, holding the historic record instance and the
timepoint used to query it ("_as_of"). HistoricOneToOneField
looks for this and uses an as_of query against the related
object so the relationship is assessed at the same timepoint.
"""
forward_related_accessor_class = HistoricForwardOneToOneDescriptor
related_accessor_class = HistoricReverseOneToOneDescriptor
def is_historic(instance):
"""
Returns True if the instance was acquired with an as_of timepoint.
"""
return to_historic(instance) is not None
def to_historic(instance):
"""
Returns a historic model instance if the instance was acquired with
an as_of timepoint, or None.
"""
return getattr(instance, SIMPLE_HISTORY_REVERSE_ATTR_NAME, None)
| HistoricOneToOneField |
python | spyder-ide__spyder | spyder/widgets/config.py | {
"start": 1455,
"end": 1542
} | class ____(QWidget):
"""Stub class to declare a config tab."""
pass
| BaseConfigTab |
python | django__django | tests/select_related_regress/models.py | {
"start": 1239,
"end": 1323
} | class ____(models.Model):
org = models.ForeignKey(Organizer, models.CASCADE)
| Class |
python | fluentpython__example-code-2e | 02-array-seq/lispy/py3.9/lis.py | {
"start": 5266,
"end": 6217
} | class ____:
"A user-defined Scheme procedure."
def __init__(
self, parms: list[Symbol], body: list[Expression], env: Environment
):
self.parms = parms
self.body = body
self.env = env
def __call__(self, *args: Expression) -> Any:
local_env = dict(zip(self.parms, args))
env = Environment(local_env, self.env)
for exp in self.body:
result = evaluate(exp, env)
return result
################ command-line interface
def run(source: str) -> Any:
global_env = Environment({}, standard_env())
tokens = tokenize(source)
while tokens:
exp = read_from_tokens(tokens)
result = evaluate(exp, global_env)
return result
def main(args: list[str]) -> None:
if len(args) == 1:
with open(args[0]) as fp:
run(fp.read())
else:
repl()
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| Procedure |
python | great-expectations__great_expectations | great_expectations/core/id_dict.py | {
"start": 2385,
"end": 2423
} | class ____(IDDict):
pass
| BatchKwargs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 2083,
"end": 2201
} | class ____[**P = [int, str, None, int | None]]: ...
# This should generate an error because T1 isn't legal here.
| ClassP4 |
python | PyCQA__pylint | pylint/pyreverse/diadefslib.py | {
"start": 7345,
"end": 9396
} | class ____(LocalsVisitor, DiaDefGenerator):
"""Generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker: Linker, handler: DiadefsHandler) -> None:
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node: Project) -> None:
"""Visit a pyreverse.utils.Project node.
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram: PackageDiagram | None = PackageDiagram(
f"packages {node.name}", mode
)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram(f"classes {node.name}", mode)
def leave_project(self, _: Project) -> Any:
"""Leave the pyreverse.utils.Project node.
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return (self.classdiagram,)
def visit_module(self, node: nodes.Module) -> None:
"""Visit an nodes.Module node.
add this class to the package diagram definition
"""
if self.pkgdiagram and self._should_include_by_depth(node):
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""Visit an nodes.Class node.
add this class to the class diagram definition
"""
anc_level, association_level = self._get_levels()
self.extract_classes(node, anc_level, association_level)
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""Visit nodes.ImportFrom and catch modules for package diagram."""
if self.pkgdiagram and self._should_include_by_depth(node):
self.pkgdiagram.add_from_depend(node, node.modname)
| DefaultDiadefGenerator |
python | ansible__ansible | test/lib/ansible_test/_internal/docker_util.py | {
"start": 28855,
"end": 33061
} | class ____:
"""The results of `docker inspect` for a single container."""
def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
self.args = args
self.inspection = inspection
# primary properties
@property
def id(self) -> str:
"""Return the ID of the container."""
return self.inspection['Id']
@property
def network_settings(self) -> dict[str, t.Any]:
"""Return a dictionary of the container network settings."""
return self.inspection['NetworkSettings']
@property
def state(self) -> dict[str, t.Any]:
"""Return a dictionary of the container state."""
return self.inspection['State']
@property
def config(self) -> dict[str, t.Any]:
"""Return a dictionary of the container configuration."""
return self.inspection['Config']
# nested properties
@property
def ports(self) -> dict[str, list[dict[str, str]]]:
"""Return a dictionary of ports the container has published."""
return self.network_settings['Ports']
@property
def networks(self) -> t.Optional[dict[str, dict[str, t.Any]]]:
"""Return a dictionary of the networks the container is attached to, or None if running under podman, which does not support networks."""
return self.network_settings.get('Networks')
@property
def running(self) -> bool:
"""Return True if the container is running, otherwise False."""
return self.state['Running']
@property
def pid(self) -> int:
"""Return the PID of the init process."""
if self.args.explain:
return 0
return self.state['Pid']
@property
def env(self) -> list[str]:
"""Return a list of the environment variables used to create the container."""
return self.config['Env']
@property
def image(self) -> str:
"""Return the image used to create the container."""
return self.config['Image']
# functions
def env_dict(self) -> dict[str, str]:
"""Return a dictionary of the environment variables used to create the container."""
return dict((item[0], item[1]) for item in [e.split('=', 1) for e in self.env])
def get_tcp_port(self, port: int) -> t.Optional[list[dict[str, str]]]:
"""Return a list of the endpoints published by the container for the specified TCP port, or None if it is not published."""
return self.ports.get('%d/tcp' % port)
def get_network_names(self) -> t.Optional[list[str]]:
"""Return a list of the network names the container is attached to."""
if self.networks is None:
return None
return sorted(self.networks)
def get_network_name(self) -> str:
"""Return the network name the container is attached to. Raises an exception if no network, or more than one, is attached."""
networks = self.get_network_names()
if not networks:
raise ApplicationError('No network found for Docker container: %s.' % self.id)
if len(networks) > 1:
raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))
return networks[0]
def docker_inspect(args: CommonConfig, identifier: str, always: bool = False) -> DockerInspect:
"""
Return the results of `docker container inspect` for the specified container.
Raises a ContainerNotFoundError if the container was not found.
"""
try:
stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0]
except SubprocessError as ex:
stdout = ex.stdout
if args.explain and not always:
items = []
else:
items = json.loads(stdout)
if len(items) == 1:
return DockerInspect(args, items[0])
raise ContainerNotFoundError(identifier)
def docker_network_disconnect(args: CommonConfig, container_id: str, network: str) -> None:
"""Disconnect the specified docker container from the given network."""
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
| DockerInspect |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 87918,
"end": 90380
} | class ____(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| RickerWavelet1D |
python | Textualize__textual | src/textual/design.py | {
"start": 486,
"end": 14996
} | class ____:
"""Defines a standard set of colors and variations for building a UI.
Primary is the main theme color
Secondary is a second theme color
"""
COLOR_NAMES = [
"primary",
"secondary",
"background",
"primary-background",
"secondary-background",
"surface",
"panel",
"boost",
"warning",
"error",
"success",
"accent",
]
def __init__(
self,
primary: str,
secondary: str | None = None,
warning: str | None = None,
error: str | None = None,
success: str | None = None,
accent: str | None = None,
foreground: str | None = None,
background: str | None = None,
surface: str | None = None,
panel: str | None = None,
boost: str | None = None,
dark: bool = False,
luminosity_spread: float = 0.15,
text_alpha: float = 0.95,
variables: dict[str, str] | None = None,
):
def parse(color: str | None) -> Color | None:
if color is None:
return None
return Color.parse(color)
self.primary = Color.parse(primary)
self.secondary = parse(secondary)
self.warning = parse(warning)
self.error = parse(error)
self.success = parse(success)
self.accent = parse(accent)
self.foreground = parse(foreground)
self.background = parse(background)
self.surface = parse(surface)
self.panel = parse(panel)
self.boost = parse(boost)
self.dark = dark
self.luminosity_spread = luminosity_spread
self.text_alpha = text_alpha
self.variables = variables or {}
"""Overrides for specific variables."""
@property
def shades(self) -> Iterable[str]:
"""The names of the colors and derived shades."""
for color in self.COLOR_NAMES:
for shade_number in range(-NUMBER_OF_SHADES, NUMBER_OF_SHADES + 1):
if shade_number < 0:
yield f"{color}-darken-{abs(shade_number)}"
elif shade_number > 0:
yield f"{color}-lighten-{shade_number}"
else:
yield color
def get_or_default(self, name: str, default: str) -> str:
"""Get the value of a color variable, or the default value if not set."""
return self.variables.get(name, default)
def generate(self) -> dict[str, str]:
"""Generate a mapping of color name on to a CSS color.
Returns:
A mapping of color name on to a CSS-style encoded color
"""
primary = self.primary
secondary = self.secondary or primary
warning = self.warning or primary
error = self.error or secondary
success = self.success or secondary
accent = self.accent or primary
dark = self.dark
luminosity_spread = self.luminosity_spread
colors: dict[str, str] = {}
if dark:
background = self.background or Color.parse(DEFAULT_DARK_BACKGROUND)
surface = self.surface or Color.parse(DEFAULT_DARK_SURFACE)
else:
background = self.background or Color.parse(DEFAULT_LIGHT_BACKGROUND)
surface = self.surface or Color.parse(DEFAULT_LIGHT_SURFACE)
foreground = self.foreground or (background.inverse)
contrast_text = background.get_contrast_text(1.0)
boost = self.boost or contrast_text.with_alpha(0.04)
# Colored text
colors["text-primary"] = contrast_text.tint(primary.with_alpha(0.66)).hex
colors["text-secondary"] = contrast_text.tint(secondary.with_alpha(0.66)).hex
colors["text-warning"] = contrast_text.tint(warning.with_alpha(0.66)).hex
colors["text-error"] = contrast_text.tint(error.with_alpha(0.66)).hex
colors["text-success"] = contrast_text.tint(success.with_alpha(0.66)).hex
colors["text-accent"] = contrast_text.tint(accent.with_alpha(0.66)).hex
if self.panel is None:
panel = surface.blend(primary, 0.1, alpha=1)
if dark:
panel += boost
else:
panel = self.panel
def luminosity_range(spread: float) -> Iterable[tuple[str, float]]:
"""Get the range of shades from darken2 to lighten2.
Returns:
Iterable of tuples (<SHADE SUFFIX, LUMINOSITY DELTA>)
"""
luminosity_step = spread / 2
for n in range(-NUMBER_OF_SHADES, +NUMBER_OF_SHADES + 1):
if n < 0:
label = "-darken"
elif n > 0:
label = "-lighten"
else:
label = ""
yield (f"{label}{'-' + str(abs(n)) if n else ''}"), n * luminosity_step
# Color names and color
COLORS: list[tuple[str, Color]] = [
("primary", primary),
("secondary", secondary),
("primary-background", primary),
("secondary-background", secondary),
("background", background),
("foreground", foreground),
("panel", panel),
("boost", boost),
("surface", surface),
("warning", warning),
("error", error),
("success", success),
("accent", accent),
]
# Colors names that have a dark variant
DARK_SHADES = {"primary-background", "secondary-background"}
get = self.get_or_default
for name, color in COLORS:
is_dark_shade = dark and name in DARK_SHADES
spread = luminosity_spread
for shade_name, luminosity_delta in luminosity_range(spread):
key = f"{name}{shade_name}"
if color.ansi is not None:
colors[key] = color.hex
elif is_dark_shade:
dark_background = background.blend(color, 0.15, alpha=1.0)
if key not in self.variables:
shade_color = dark_background.blend(
WHITE, spread + luminosity_delta, alpha=1.0
).clamped
colors[key] = shade_color.hex
else:
colors[key] = self.variables[key]
else:
colors[key] = get(key, color.lighten(luminosity_delta).hex)
if foreground.ansi is None:
colors["text"] = get("text", "auto 87%")
colors["text-muted"] = get("text-muted", "auto 60%")
colors["text-disabled"] = get("text-disabled", "auto 38%")
else:
colors["text"] = "ansi_default"
colors["text-muted"] = "ansi_default"
colors["text-disabled"] = "ansi_default"
# Muted variants of base colors
colors["primary-muted"] = get(
"primary-muted", primary.blend(background, 0.7).hex
)
colors["secondary-muted"] = get(
"secondary-muted", secondary.blend(background, 0.7).hex
)
colors["accent-muted"] = get("accent-muted", accent.blend(background, 0.7).hex)
colors["warning-muted"] = get(
"warning-muted", warning.blend(background, 0.7).hex
)
colors["error-muted"] = get("error-muted", error.blend(background, 0.7).hex)
colors["success-muted"] = get(
"success-muted", success.blend(background, 0.7).hex
)
# Foreground colors
colors["foreground-muted"] = get(
"foreground-muted", foreground.with_alpha(0.6).hex
)
colors["foreground-disabled"] = get(
"foreground-disabled", foreground.with_alpha(0.38).hex
)
# The cursor color for widgets such as OptionList, DataTable, etc.
colors["block-cursor-foreground"] = get(
"block-cursor-foreground", colors["text"]
)
colors["block-cursor-background"] = get("block-cursor-background", primary.hex)
colors["block-cursor-text-style"] = get("block-cursor-text-style", "bold")
colors["block-cursor-blurred-foreground"] = get(
"block-cursor-blurred-foreground", foreground.hex
)
colors["block-cursor-blurred-background"] = get(
"block-cursor-blurred-background", primary.with_alpha(0.3).hex
)
colors["block-cursor-blurred-text-style"] = get(
"block-cursor-blurred-text-style", "none"
)
colors["block-hover-background"] = get(
"block-hover-background", boost.with_alpha(0.1).hex
)
# The border color for focused widgets which have a border.
colors["border"] = get("border", primary.hex)
colors["border-blurred"] = get("border-blurred", surface.darken(0.025).hex)
# The surface color for builtin focused widgets
colors["surface-active"] = get(
"surface-active", surface.lighten(self.luminosity_spread / 2.5).hex
)
# The scrollbar colors
colors["scrollbar"] = get(
"scrollbar",
(Color.parse(colors["background-darken-1"]) + primary.with_alpha(0.4)).hex,
)
colors["scrollbar-hover"] = get(
"scrollbar-hover",
(Color.parse(colors["background-darken-1"]) + primary.with_alpha(0.5)).hex,
)
# colors["scrollbar-active"] = get("scrollbar-active", colors["panel-lighten-2"])
colors["scrollbar-active"] = get("scrollbar-active", primary.hex)
colors["scrollbar-background"] = get(
"scrollbar-background", colors["background-darken-1"]
)
colors["scrollbar-corner-color"] = get(
"scrollbar-corner-color", colors["scrollbar-background"]
)
colors["scrollbar-background-hover"] = get(
"scrollbar-background-hover", colors["scrollbar-background"]
)
colors["scrollbar-background-active"] = get(
"scrollbar-background-active", colors["scrollbar-background"]
)
# Links
colors["link-background"] = get("link-background", "initial")
colors["link-background-hover"] = get("link-background-hover", primary.hex)
colors["link-color"] = get("link-color", colors["text"])
colors["link-style"] = get("link-style", "underline")
colors["link-color-hover"] = get("link-color-hover", colors["text"])
colors["link-style-hover"] = get("link-style-hover", "bold not underline")
colors["footer-foreground"] = get("footer-foreground", foreground.hex)
colors["footer-background"] = get("footer-background", panel.hex)
colors["footer-key-foreground"] = get("footer-key-foreground", accent.hex)
colors["footer-key-background"] = get("footer-key-background", "transparent")
colors["footer-description-foreground"] = get(
"footer-description-foreground", foreground.hex
)
colors["footer-description-background"] = get(
"footer-description-background", "transparent"
)
colors["footer-item-background"] = get("footer-item-background", "transparent")
colors["input-cursor-background"] = get(
"input-cursor-background", foreground.hex
)
colors["input-cursor-foreground"] = get(
"input-cursor-foreground", background.hex
)
colors["input-cursor-text-style"] = get("input-cursor-text-style", "none")
colors["input-selection-background"] = get(
"input-selection-background",
Color.parse(colors["primary-lighten-1"]).with_alpha(0.4).hex,
)
# Markdown header styles
colors["markdown-h1-color"] = get("markdown-h1-color", primary.hex)
colors["markdown-h1-background"] = get("markdown-h1-background", "transparent")
colors["markdown-h1-text-style"] = get("markdown-h1-text-style", "bold")
colors["markdown-h2-color"] = get("markdown-h2-color", primary.hex)
colors["markdown-h2-background"] = get("markdown-h2-background", "transparent")
colors["markdown-h2-text-style"] = get("markdown-h2-text-style", "underline")
colors["markdown-h3-color"] = get("markdown-h3-color", primary.hex)
colors["markdown-h3-background"] = get("markdown-h3-background", "transparent")
colors["markdown-h3-text-style"] = get("markdown-h3-text-style", "bold")
colors["markdown-h4-color"] = get("markdown-h4-color", foreground.hex)
colors["markdown-h4-background"] = get("markdown-h4-background", "transparent")
colors["markdown-h4-text-style"] = get(
"markdown-h4-text-style", "bold underline"
)
colors["markdown-h5-color"] = get("markdown-h5-color", foreground.hex)
colors["markdown-h5-background"] = get("markdown-h5-background", "transparent")
colors["markdown-h5-text-style"] = get("markdown-h5-text-style", "bold")
colors["markdown-h6-color"] = get(
"markdown-h6-color", colors["foreground-muted"]
)
colors["markdown-h6-background"] = get("markdown-h6-background", "transparent")
colors["markdown-h6-text-style"] = get("markdown-h6-text-style", "bold")
colors["button-foreground"] = get("button-foreground", foreground.hex)
colors["button-color-foreground"] = get(
"button-color-foreground", colors["text"]
)
colors["button-focus-text-style"] = get("button-focus-text-style", "b reverse")
return colors
def show_design(light: ColorSystem, dark: ColorSystem) -> Table:
"""Generate a renderable to show color systems.
Args:
light: Light ColorSystem.
dark: Dark ColorSystem
Returns:
Table showing all colors.
"""
@group()
def make_shades(system: ColorSystem):
colors = system.generate()
for name in system.shades:
background = Color.parse(colors[name]).with_alpha(1.0)
foreground = background + background.get_contrast_text(0.9)
text = Text(f"${name}")
yield Padding(text, 1, style=f"{foreground.hex6} on {background.hex6}")
table = Table(box=None, expand=True)
table.add_column("Light", justify="center")
table.add_column("Dark", justify="center")
table.add_row(make_shades(light), make_shades(dark))
return table
| ColorSystem |
python | huggingface__transformers | src/transformers/models/mvp/modeling_mvp.py | {
"start": 28342,
"end": 39430
} | class ____(MvpPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`]
Args:
config: MvpConfig
embed_tokens (nn.Embedding): output embedding
use_prompt (bool): whether to use prompt
"""
def __init__(self, config: MvpConfig, use_prompt: Optional[bool] = False):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MvpLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([MvpDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.use_prompt = use_prompt
if use_prompt:
self.prompt_length = config.prompt_length
self.self_attn_prompt = MvpPrompt(
config,
config.decoder_layers,
config.decoder_attention_heads,
)
self.cross_attn_prompt = MvpPrompt(
config,
config.decoder_layers,
config.decoder_attention_heads,
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input_ids.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# embed positions
positions = self.embed_positions(input, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# layer-wise prompt
if self.use_prompt:
prompt_ids = torch.arange(self.prompt_length).to(self.device)
self_attn_prompt = self.self_attn_prompt(prompt_ids)
cross_attn_prompt = self.cross_attn_prompt(prompt_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None),
cross_attn_prompt=(cross_attn_prompt[idx] if self.use_prompt else None),
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| MvpDecoder |
python | nedbat__coveragepy | coverage/debug.py | {
"start": 13410,
"end": 13888
} | class ____:
"""Track the current pytest test name to add to debug messages."""
def __init__(self) -> None:
self.test_name: str | None = None
def filter(self, text: str) -> str:
"""Add a message when the pytest test changes."""
test_name = os.getenv("PYTEST_CURRENT_TEST")
if test_name != self.test_name:
text = f"Pytest context: {test_name}\n{text}"
self.test_name = test_name
return text
| PytestTracker |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.