language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spyder-ide__spyder | spyder/widgets/browser.py | {
"start": 1460,
"end": 1601
} | class ____:
Move = 'move_section'
Select = 'select_section'
Zoom = 'zoom_section'
Extras = 'extras_section'
| WebViewMenuSections |
python | pytorch__pytorch | test/distributed/test_debug.py | {
"start": 537,
"end": 1743
} | class ____(TestCase):
def test_basics(self) -> None:
store = dist.TCPStore("localhost", 0, 1, is_master=True, wait_for_workers=False)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(store.port)
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
port = 25999
def fetch(path: str) -> str:
resp = session.get(f"http://localhost:{port}{path}")
resp.raise_for_status()
return resp.text
start_debug_server(port=port)
self.assertIn("torch profiler", fetch("/"))
self.assertIn("View 0", fetch("/profile?duration=0.01"))
self.assertIn("test_basics", fetch("/stacks"))
self.assertIn("pg_status", fetch("/fr_trace"))
self.assertIn("Rank 0", fetch("/wait_counters"))
if torch.cuda.is_available():
self.assertIn("pg_status", fetch("/fr_trace_nccl"))
# test errors
resp = session.get(f"http://localhost:{port}/blah")
self.assertEqual(resp.status_code, 404)
self.assertIn("Handler not found: /blah", resp.text)
stop_debug_server()
if __name__ == "__main__":
run_tests()
| TestDebug |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 51129,
"end": 60397
} | class ____(
_WriteOnlyFixture,
_UOWTests,
_fixtures.FixtureTest,
testing.AssertsExecutionResults,
):
run_inserts = None
__sparse_driver_backend__ = True
@testing.requires.insert_executemany_returning
@testing.combinations(True, False, argnames="flush_user_first")
def test_bulk_insert(self, user_address_fixture, flush_user_first):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
sess = fixture_session()
u1 = User(name="x")
sess.add(u1)
# ha ha! u1 is not persistent yet. autoflush wont happen
# until sess.scalars() actually runs. statement has to be
# created with a pending parameter, not actual parameter
assert inspect(u1).pending
if flush_user_first:
sess.flush()
with self.sql_execution_asserter() as asserter:
addresses = sess.scalars(
u1.addresses.insert().returning(Address),
[
{"email_address": "e1"},
{"email_address": "e2"},
{"email_address": "e3"},
],
).all()
eq_(
addresses,
[
Address(user=User(name="x"), email_address="e1"),
Address(user=User(name="x"), email_address="e2"),
Address(user=User(name="x"), email_address="e3"),
],
)
uid = u1.id
asserter.assert_(
Conditional(
not flush_user_first,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "x"}],
)
],
[],
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:param_1, :email_address) "
"RETURNING addresses.id, addresses.user_id, "
"addresses.email_address",
[
{"param_1": uid, "email_address": "e1"},
{"param_1": uid, "email_address": "e2"},
{"param_1": uid, "email_address": "e3"},
],
),
)
@testing.requires.update_returning
@testing.combinations(True, False, argnames="flush_user_first")
def test_bulk_update(self, user_address_fixture, flush_user_first):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
sess = fixture_session()
u1 = User(
name="x",
addresses=[
Address(email_address="e1"),
Address(email_address="e2"),
Address(email_address="e3"),
],
)
sess.add(u1)
# ha ha! u1 is not persistent yet. autoflush wont happen
# until sess.scalars() actually runs. statement has to be
# created with a pending parameter, not actual parameter
assert inspect(u1).pending
if flush_user_first:
sess.flush()
with self.sql_execution_asserter() as asserter:
addresses = sess.scalars(
u1.addresses.update()
.values(email_address=Address.email_address + "@foo.com")
.returning(Address),
).all()
eq_(
addresses,
[
Address(user=User(name="x"), email_address="e1@foo.com"),
Address(user=User(name="x"), email_address="e2@foo.com"),
Address(user=User(name="x"), email_address="e3@foo.com"),
],
)
uid = u1.id
asserter.assert_(
Conditional(
not flush_user_first,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "x"}],
),
Conditional(
testing.requires.insert_executemany_returning.enabled,
[
CompiledSQL(
"INSERT INTO addresses "
"(user_id, email_address) "
"VALUES (:user_id, :email_address) "
"RETURNING addresses.id",
[
{"user_id": uid, "email_address": "e1"},
{"user_id": uid, "email_address": "e2"},
{"user_id": uid, "email_address": "e3"},
],
)
],
[
CompiledSQL(
"INSERT INTO addresses "
"(user_id, email_address) "
"VALUES (:user_id, :email_address)",
param,
)
for param in [
{"user_id": uid, "email_address": "e1"},
{"user_id": uid, "email_address": "e2"},
{"user_id": uid, "email_address": "e3"},
]
],
),
],
[],
),
CompiledSQL(
"UPDATE addresses SET email_address=(addresses.email_address "
"|| :email_address_1) WHERE :param_1 = addresses.user_id "
"RETURNING addresses.id, addresses.user_id, "
"addresses.email_address",
[{"email_address_1": "@foo.com", "param_1": uid}],
),
)
@testing.requires.delete_returning
@testing.combinations(True, False, argnames="flush_user_first")
def test_bulk_delete(self, user_address_fixture, flush_user_first):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
sess = fixture_session()
u1 = User(
name="x",
addresses=[
Address(email_address="e1"),
Address(email_address="e2"),
Address(email_address="e3"),
],
)
sess.add(u1)
# ha ha! u1 is not persistent yet. autoflush wont happen
# until sess.scalars() actually runs. statement has to be
# created with a pending parameter, not actual parameter
assert inspect(u1).pending
if flush_user_first:
sess.flush()
with self.sql_execution_asserter() as asserter:
addresses = sess.scalars(
u1.addresses.delete()
.where(Address.email_address == "e2")
.returning(Address),
).all()
eq_(
addresses,
[
Address(email_address="e2"),
],
)
uid = u1.id
asserter.assert_(
Conditional(
not flush_user_first,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "x"}],
),
Conditional(
testing.requires.insert_executemany_returning.enabled,
[
CompiledSQL(
"INSERT INTO addresses "
"(user_id, email_address) "
"VALUES (:user_id, :email_address) "
"RETURNING addresses.id",
[
{"user_id": uid, "email_address": "e1"},
{"user_id": uid, "email_address": "e2"},
{"user_id": uid, "email_address": "e3"},
],
)
],
[
CompiledSQL(
"INSERT INTO addresses "
"(user_id, email_address) "
"VALUES (:user_id, :email_address)",
param,
)
for param in [
{"user_id": uid, "email_address": "e1"},
{"user_id": uid, "email_address": "e2"},
{"user_id": uid, "email_address": "e3"},
]
],
),
],
[],
),
CompiledSQL(
"DELETE FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address = :email_address_1 "
"RETURNING addresses.id, addresses.user_id, "
"addresses.email_address",
[{"param_1": uid, "email_address_1": "e2"}],
),
)
| WriteOnlyBulkTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_iter.py | {
"start": 699,
"end": 2326
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
# Test iterators.
import sys
import unittest
from test.support import cpython_only
from test.support.os_helper import TESTFN, unlink
from test.support import check_free_after_iterating, ALWAYS_EQ, NEVER_EQ
from test.support import BrokenIter
import pickle
import collections.abc
import functools
import contextlib
import builtins
import traceback
# Test result of triple loop (too big to inline)
TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
(0, 1, 0), (0, 1, 1), (0, 1, 2),
(0, 2, 0), (0, 2, 1), (0, 2, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2),
(1, 1, 0), (1, 1, 1), (1, 1, 2),
(1, 2, 0), (1, 2, 1), (1, 2, 2),
(2, 0, 0), (2, 0, 1), (2, 0, 2),
(2, 1, 0), (2, 1, 1), (2, 1, 2),
(2, 2, 0), (2, 2, 1), (2, 2, 2)]
# Helper classes
| RedirectImportFinder |
python | Pylons__pyramid | tests/test_config/test_settings.py | {
"start": 2918,
"end": 31997
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.config.settings import Settings
return Settings
def _makeOne(self, d=None, environ=None):
if environ is None:
environ = {}
klass = self._getTargetClass()
return klass(d, _environ_=environ)
def test_noargs(self):
settings = self._makeOne()
self.assertEqual(settings['debug_authorization'], False)
self.assertEqual(settings['debug_notfound'], False)
self.assertEqual(settings['debug_routematch'], False)
self.assertEqual(settings['reload_templates'], False)
self.assertEqual(settings['reload_resources'], False)
self.assertEqual(settings['pyramid.debug_authorization'], False)
self.assertEqual(settings['pyramid.debug_notfound'], False)
self.assertEqual(settings['pyramid.debug_routematch'], False)
self.assertEqual(settings['pyramid.reload_templates'], False)
self.assertEqual(settings['pyramid.reload_resources'], False)
def test_prevent_http_cache(self):
settings = self._makeOne({})
self.assertEqual(settings['prevent_http_cache'], False)
self.assertEqual(settings['pyramid.prevent_http_cache'], False)
result = self._makeOne({'prevent_http_cache': 'false'})
self.assertEqual(result['prevent_http_cache'], False)
self.assertEqual(result['pyramid.prevent_http_cache'], False)
result = self._makeOne({'prevent_http_cache': 't'})
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
result = self._makeOne({'prevent_http_cache': '1'})
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
result = self._makeOne({'pyramid.prevent_http_cache': 't'})
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
result = self._makeOne({}, {'PYRAMID_PREVENT_HTTP_CACHE': '1'})
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
result = self._makeOne(
{'prevent_http_cache': 'false', 'pyramid.prevent_http_cache': '1'}
)
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
result = self._makeOne(
{'prevent_http_cache': 'false', 'pyramid.prevent_http_cache': 'f'},
{'PYRAMID_PREVENT_HTTP_CACHE': '1'},
)
self.assertEqual(result['prevent_http_cache'], True)
self.assertEqual(result['pyramid.prevent_http_cache'], True)
def test_prevent_cachebust(self):
settings = self._makeOne({})
self.assertEqual(settings['prevent_cachebust'], False)
self.assertEqual(settings['pyramid.prevent_cachebust'], False)
result = self._makeOne({'prevent_cachebust': 'false'})
self.assertEqual(result['prevent_cachebust'], False)
self.assertEqual(result['pyramid.prevent_cachebust'], False)
result = self._makeOne({'prevent_cachebust': 't'})
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
result = self._makeOne({'prevent_cachebust': '1'})
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
result = self._makeOne({'pyramid.prevent_cachebust': 't'})
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
result = self._makeOne({}, {'PYRAMID_PREVENT_CACHEBUST': '1'})
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
result = self._makeOne(
{'prevent_cachebust': 'false', 'pyramid.prevent_cachebust': '1'}
)
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
result = self._makeOne(
{'prevent_cachebust': 'false', 'pyramid.prevent_cachebust': 'f'},
{'PYRAMID_PREVENT_CACHEBUST': '1'},
)
self.assertEqual(result['prevent_cachebust'], True)
self.assertEqual(result['pyramid.prevent_cachebust'], True)
def test_reload_templates(self):
settings = self._makeOne({})
self.assertEqual(settings['reload_templates'], False)
self.assertEqual(settings['pyramid.reload_templates'], False)
result = self._makeOne({'reload_templates': 'false'})
self.assertEqual(result['reload_templates'], False)
self.assertEqual(result['pyramid.reload_templates'], False)
result = self._makeOne({'reload_templates': 't'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
result = self._makeOne({'reload_templates': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
result = self._makeOne({'pyramid.reload_templates': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
result = self._makeOne({}, {'PYRAMID_RELOAD_TEMPLATES': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
result = self._makeOne(
{'reload_templates': 'false', 'pyramid.reload_templates': '1'}
)
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
result = self._makeOne(
{'reload_templates': 'false'}, {'PYRAMID_RELOAD_TEMPLATES': '1'}
)
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
def test_reload_resources(self):
# alias for reload_assets
result = self._makeOne({})
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
result = self._makeOne({'reload_resources': 'false'})
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
result = self._makeOne({'reload_resources': 't'})
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({'reload_resources': '1'})
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({'pyramid.reload_resources': '1'})
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({}, {'PYRAMID_RELOAD_RESOURCES': '1'})
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne(
{'reload_resources': 'false', 'pyramid.reload_resources': '1'}
)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne(
{'reload_resources': 'false', 'pyramid.reload_resources': 'false'},
{'PYRAMID_RELOAD_RESOURCES': '1'},
)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
def test_reload_assets(self):
# alias for reload_resources
result = self._makeOne({})
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
result = self._makeOne({'reload_assets': 'false'})
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
result = self._makeOne({'reload_assets': 't'})
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
result = self._makeOne({'reload_assets': '1'})
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
result = self._makeOne({'pyramid.reload_assets': '1'})
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
result = self._makeOne({}, {'PYRAMID_RELOAD_ASSETS': '1'})
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
result = self._makeOne(
{'reload_assets': 'false', 'pyramid.reload_assets': '1'}
)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
result = self._makeOne(
{'reload_assets': 'false', 'pyramid.reload_assets': 'false'},
{'PYRAMID_RELOAD_ASSETS': '1'},
)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
def test_reload_all(self):
result = self._makeOne({})
self.assertEqual(result['reload_templates'], False)
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['pyramid.reload_templates'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
result = self._makeOne({'reload_all': 'false'})
self.assertEqual(result['reload_templates'], False)
self.assertEqual(result['reload_resources'], False)
self.assertEqual(result['reload_assets'], False)
self.assertEqual(result['pyramid.reload_templates'], False)
self.assertEqual(result['pyramid.reload_resources'], False)
self.assertEqual(result['pyramid.reload_assets'], False)
result = self._makeOne({'reload_all': 't'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({'reload_all': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({'pyramid.reload_all': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne({}, {'PYRAMID_RELOAD_ALL': '1'})
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne(
{'reload_all': 'false', 'pyramid.reload_all': '1'}
)
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
result = self._makeOne(
{'reload_all': 'false', 'pyramid.reload_all': 'false'},
{'PYRAMID_RELOAD_ALL': '1'},
)
self.assertEqual(result['reload_templates'], True)
self.assertEqual(result['reload_resources'], True)
self.assertEqual(result['reload_assets'], True)
self.assertEqual(result['pyramid.reload_templates'], True)
self.assertEqual(result['pyramid.reload_resources'], True)
self.assertEqual(result['pyramid.reload_assets'], True)
def test_debug_authorization(self):
result = self._makeOne({})
self.assertEqual(result['debug_authorization'], False)
self.assertEqual(result['pyramid.debug_authorization'], False)
result = self._makeOne({'debug_authorization': 'false'})
self.assertEqual(result['debug_authorization'], False)
self.assertEqual(result['pyramid.debug_authorization'], False)
result = self._makeOne({'debug_authorization': 't'})
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
result = self._makeOne({'debug_authorization': '1'})
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
result = self._makeOne({'pyramid.debug_authorization': '1'})
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
result = self._makeOne({}, {'PYRAMID_DEBUG_AUTHORIZATION': '1'})
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
result = self._makeOne(
{
'debug_authorization': 'false',
'pyramid.debug_authorization': '1',
}
)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
result = self._makeOne(
{
'debug_authorization': 'false',
'pyramid.debug_authorization': 'false',
},
{'PYRAMID_DEBUG_AUTHORIZATION': '1'},
)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
def test_debug_notfound(self):
result = self._makeOne({})
self.assertEqual(result['debug_notfound'], False)
self.assertEqual(result['pyramid.debug_notfound'], False)
result = self._makeOne({'debug_notfound': 'false'})
self.assertEqual(result['debug_notfound'], False)
self.assertEqual(result['pyramid.debug_notfound'], False)
result = self._makeOne({'debug_notfound': 't'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
result = self._makeOne({'debug_notfound': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
result = self._makeOne({'pyramid.debug_notfound': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
result = self._makeOne({}, {'PYRAMID_DEBUG_NOTFOUND': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
result = self._makeOne(
{'debug_notfound': 'false', 'pyramid.debug_notfound': '1'}
)
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
result = self._makeOne(
{'debug_notfound': 'false', 'pyramid.debug_notfound': 'false'},
{'PYRAMID_DEBUG_NOTFOUND': '1'},
)
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
def test_debug_routematch(self):
result = self._makeOne({})
self.assertEqual(result['debug_routematch'], False)
self.assertEqual(result['pyramid.debug_routematch'], False)
result = self._makeOne({'debug_routematch': 'false'})
self.assertEqual(result['debug_routematch'], False)
self.assertEqual(result['pyramid.debug_routematch'], False)
result = self._makeOne({'debug_routematch': 't'})
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
result = self._makeOne({'debug_routematch': '1'})
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
result = self._makeOne({'pyramid.debug_routematch': '1'})
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
result = self._makeOne({}, {'PYRAMID_DEBUG_ROUTEMATCH': '1'})
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
result = self._makeOne(
{'debug_routematch': 'false', 'pyramid.debug_routematch': '1'}
)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
result = self._makeOne(
{'debug_routematch': 'false', 'pyramid.debug_routematch': 'false'},
{'PYRAMID_DEBUG_ROUTEMATCH': '1'},
)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
def test_debug_templates(self):
result = self._makeOne({})
self.assertEqual(result['debug_templates'], False)
self.assertEqual(result['pyramid.debug_templates'], False)
result = self._makeOne({'debug_templates': 'false'})
self.assertEqual(result['debug_templates'], False)
self.assertEqual(result['pyramid.debug_templates'], False)
result = self._makeOne({'debug_templates': 't'})
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({'debug_templates': '1'})
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({'pyramid.debug_templates': '1'})
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({}, {'PYRAMID_DEBUG_TEMPLATES': '1'})
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne(
{'debug_templates': 'false', 'pyramid.debug_templates': '1'}
)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne(
{'debug_templates': 'false', 'pyramid.debug_templates': 'false'},
{'PYRAMID_DEBUG_TEMPLATES': '1'},
)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
def test_debug_all(self):
result = self._makeOne({})
self.assertEqual(result['debug_notfound'], False)
self.assertEqual(result['debug_routematch'], False)
self.assertEqual(result['debug_authorization'], False)
self.assertEqual(result['debug_templates'], False)
self.assertEqual(result['pyramid.debug_notfound'], False)
self.assertEqual(result['pyramid.debug_routematch'], False)
self.assertEqual(result['pyramid.debug_authorization'], False)
self.assertEqual(result['pyramid.debug_templates'], False)
result = self._makeOne({'debug_all': 'false'})
self.assertEqual(result['debug_notfound'], False)
self.assertEqual(result['debug_routematch'], False)
self.assertEqual(result['debug_authorization'], False)
self.assertEqual(result['debug_templates'], False)
self.assertEqual(result['pyramid.debug_notfound'], False)
self.assertEqual(result['pyramid.debug_routematch'], False)
self.assertEqual(result['pyramid.debug_authorization'], False)
self.assertEqual(result['pyramid.debug_templates'], False)
result = self._makeOne({'debug_all': 't'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({'debug_all': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({'pyramid.debug_all': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne({}, {'PYRAMID_DEBUG_ALL': '1'})
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne(
{'debug_all': 'false', 'pyramid.debug_all': '1'}
)
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
result = self._makeOne(
{'debug_all': 'false', 'pyramid.debug_all': 'false'},
{'PYRAMID_DEBUG_ALL': '1'},
)
self.assertEqual(result['debug_notfound'], True)
self.assertEqual(result['debug_routematch'], True)
self.assertEqual(result['debug_authorization'], True)
self.assertEqual(result['debug_templates'], True)
self.assertEqual(result['pyramid.debug_notfound'], True)
self.assertEqual(result['pyramid.debug_routematch'], True)
self.assertEqual(result['pyramid.debug_authorization'], True)
self.assertEqual(result['pyramid.debug_templates'], True)
def test_default_locale_name(self):
result = self._makeOne({})
self.assertEqual(result['default_locale_name'], 'en')
self.assertEqual(result['pyramid.default_locale_name'], 'en')
result = self._makeOne({'default_locale_name': 'abc'})
self.assertEqual(result['default_locale_name'], 'abc')
self.assertEqual(result['pyramid.default_locale_name'], 'abc')
result = self._makeOne({'pyramid.default_locale_name': 'abc'})
self.assertEqual(result['default_locale_name'], 'abc')
self.assertEqual(result['pyramid.default_locale_name'], 'abc')
result = self._makeOne({}, {'PYRAMID_DEFAULT_LOCALE_NAME': 'abc'})
self.assertEqual(result['default_locale_name'], 'abc')
self.assertEqual(result['pyramid.default_locale_name'], 'abc')
result = self._makeOne(
{
'default_locale_name': 'def',
'pyramid.default_locale_name': 'abc',
}
)
self.assertEqual(result['default_locale_name'], 'abc')
self.assertEqual(result['pyramid.default_locale_name'], 'abc')
result = self._makeOne(
{
'default_locale_name': 'def',
'pyramid.default_locale_name': 'ghi',
},
{'PYRAMID_DEFAULT_LOCALE_NAME': 'abc'},
)
self.assertEqual(result['default_locale_name'], 'abc')
self.assertEqual(result['pyramid.default_locale_name'], 'abc')
def test_csrf_trusted_origins(self):
result = self._makeOne({})
self.assertEqual(result['pyramid.csrf_trusted_origins'], [])
result = self._makeOne({'pyramid.csrf_trusted_origins': 'example.com'})
self.assertEqual(
result['pyramid.csrf_trusted_origins'], ['example.com']
)
result = self._makeOne(
{'pyramid.csrf_trusted_origins': ['example.com']}
)
self.assertEqual(
result['pyramid.csrf_trusted_origins'], ['example.com']
)
result = self._makeOne(
{
'pyramid.csrf_trusted_origins': (
'example.com foo.example.com\nasdf.example.com'
)
}
)
self.assertEqual(
result['pyramid.csrf_trusted_origins'],
['example.com', 'foo.example.com', 'asdf.example.com'],
)
def test_originals_kept(self):
result = self._makeOne({'a': 'i am so a'})
self.assertEqual(result['a'], 'i am so a')
| TestSettings |
python | spack__spack | lib/spack/spack/error.py | {
"start": 5567,
"end": 5826
} | class ____(SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
| StopPhase |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 4516,
"end": 5961
} | class ____(serializers.Serializer[Never]):
field = serializers.ListField(child=serializers.CharField(), required=False, allow_null=True)
query = serializers.CharField(required=False, allow_null=True)
spanOp = serializers.ListField(
child=serializers.CharField(), required=False, allow_null=True, max_length=5
)
excludeSpanOp = serializers.ListField(
child=serializers.CharField(), required=False, allow_null=True, max_length=5
)
spanGroup = serializers.ListField(
child=serializers.CharField(), required=False, allow_null=True, max_length=4
)
min_exclusive_time = serializers.FloatField(required=False)
max_exclusive_time = serializers.FloatField(required=False)
def validate(self, data: dict[str, Any]) -> dict[str, Any]:
if (
"min_exclusive_time" in data
and "max_exclusive_time" in data
and data["min_exclusive_time"] > data["max_exclusive_time"]
):
raise serializers.ValidationError(
"min_exclusive_time cannot be greater than max_exclusive_time."
)
return data
def validate_spanGroup(self, span_groups: list[object]) -> list[object]:
for group in span_groups:
if not is_span_id(group):
raise serializers.ValidationError(INVALID_SPAN_ID.format("spanGroup"))
return span_groups
@region_silo_endpoint
| SpansPerformanceSerializer |
python | tensorflow__tensorflow | tensorflow/python/framework/function_test.py | {
"start": 48126,
"end": 52925
} | class ____(test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return random_ops.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = math_ops.maximum(math_ops.minimum(new_c, 50.0), -50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = array_ops_stack.unstack(i, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32)(
cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
*([dtypes.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop10(weights, inp):
x = array_ops_stack.unstack(inp, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = math_ops.reduce_sum(math_ops.square(m))
dw = gradients_impl.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4)
| UnrollLSTMTest |
python | ray-project__ray | python/ray/serve/_private/long_poll.py | {
"start": 1725,
"end": 2172
} | class ____:
object_snapshot: Any
# The identifier for the object's version. There is not sequential relation
# among different object's snapshot_ids.
snapshot_id: int
# Type signature for the update state callbacks. E.g.
# async def update_state(updated_object: Any):
# do_something(updated_object)
UpdateStateCallable = Callable[[Any], None]
KeyType = Union[str, LongPollNamespace, Tuple[LongPollNamespace, str]]
| UpdatedObject |
python | pallets__flask | src/flask/wrappers.py | {
"start": 430,
"end": 8110
} | class ____(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
json_module: t.Any = json
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#: Though if the request's method was invalid for the URL rule,
#: the valid list is available in ``routing_exception.valid_methods``
#: instead (an attribute of the Werkzeug exception
#: :exc:`~werkzeug.exceptions.MethodNotAllowed`)
#: because the request was never internally bound.
#:
#: .. versionadded:: 0.6
url_rule: Rule | None = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args: dict[str, t.Any] | None = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception: HTTPException | None = None
_max_content_length: int | None = None
_max_form_memory_size: int | None = None
_max_form_parts: int | None = None
@property
def max_content_length(self) -> int | None:
"""The maximum number of bytes that will be read during this request. If
this limit is exceeded, a 413 :exc:`~werkzeug.exceptions.RequestEntityTooLarge`
error is raised. If it is set to ``None``, no limit is enforced at the
Flask application level. However, if it is ``None`` and the request has
no ``Content-Length`` header and the WSGI server does not indicate that
it terminates the stream, then no data is read to avoid an infinite
stream.
Each request defaults to the :data:`MAX_CONTENT_LENGTH` config, which
defaults to ``None``. It can be set on a specific ``request`` to apply
the limit to that specific view. This should be set appropriately based
on an application's or view's specific needs.
.. versionchanged:: 3.1
This can be set per-request.
.. versionchanged:: 0.6
This is configurable through Flask config.
"""
if self._max_content_length is not None:
return self._max_content_length
if not current_app:
return super().max_content_length
return current_app.config["MAX_CONTENT_LENGTH"] # type: ignore[no-any-return]
@max_content_length.setter
def max_content_length(self, value: int | None) -> None:
self._max_content_length = value
@property
def max_form_memory_size(self) -> int | None:
"""The maximum size in bytes any non-file form field may be in a
``multipart/form-data`` body. If this limit is exceeded, a 413
:exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
is set to ``None``, no limit is enforced at the Flask application level.
Each request defaults to the :data:`MAX_FORM_MEMORY_SIZE` config, which
defaults to ``500_000``. It can be set on a specific ``request`` to
apply the limit to that specific view. This should be set appropriately
based on an application's or view's specific needs.
.. versionchanged:: 3.1
This is configurable through Flask config.
"""
if self._max_form_memory_size is not None:
return self._max_form_memory_size
if not current_app:
return super().max_form_memory_size
return current_app.config["MAX_FORM_MEMORY_SIZE"] # type: ignore[no-any-return]
@max_form_memory_size.setter
def max_form_memory_size(self, value: int | None) -> None:
self._max_form_memory_size = value
@property # type: ignore[override]
def max_form_parts(self) -> int | None:
"""The maximum number of fields that may be present in a
``multipart/form-data`` body. If this limit is exceeded, a 413
:exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
is set to ``None``, no limit is enforced at the Flask application level.
Each request defaults to the :data:`MAX_FORM_PARTS` config, which
defaults to ``1_000``. It can be set on a specific ``request`` to apply
the limit to that specific view. This should be set appropriately based
on an application's or view's specific needs.
.. versionchanged:: 3.1
This is configurable through Flask config.
"""
if self._max_form_parts is not None:
return self._max_form_parts
if not current_app:
return super().max_form_parts
return current_app.config["MAX_FORM_PARTS"] # type: ignore[no-any-return]
@max_form_parts.setter
def max_form_parts(self, value: int | None) -> None:
self._max_form_parts = value
@property
def endpoint(self) -> str | None:
"""The endpoint that matched the request URL.
This will be ``None`` if matching failed or has not been
performed yet.
This in combination with :attr:`view_args` can be used to
reconstruct the same URL or a modified URL.
"""
if self.url_rule is not None:
return self.url_rule.endpoint # type: ignore[no-any-return]
return None
@property
def blueprint(self) -> str | None:
"""The registered name of the current blueprint.
This will be ``None`` if the endpoint is not part of a
blueprint, or if URL matching failed or has not been performed
yet.
This does not necessarily match the name the blueprint was
created with. It may have been nested, or registered with a
different name.
"""
endpoint = self.endpoint
if endpoint is not None and "." in endpoint:
return endpoint.rpartition(".")[0]
return None
@property
def blueprints(self) -> list[str]:
"""The registered names of the current blueprint upwards through
parent blueprints.
This will be an empty list if there is no current blueprint, or
if URL matching failed.
.. versionadded:: 2.0.1
"""
name = self.blueprint
if name is None:
return []
return _split_blueprint_path(name)
def _load_form_data(self) -> None:
super()._load_form_data()
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
if (
current_app
and current_app.debug
and self.mimetype != "multipart/form-data"
and not self.files
):
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
def on_json_loading_failed(self, e: ValueError | None) -> t.Any:
try:
return super().on_json_loading_failed(e)
except BadRequest as ebr:
if current_app and current_app.debug:
raise
raise BadRequest() from ebr
| Request |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure_tests/pipes_tests/mock_blob_storage.py | {
"start": 2909,
"end": 2994
} | class ____(BytesIO):
def readall(self):
return self.read()
| BytesIOWithReadAll |
python | celery__celery | celery/worker/components.py | {
"start": 6229,
"end": 6618
} | class ____(bootsteps.Step):
"""Bootstep that sets up between-restart state database file."""
def __init__(self, w, **kwargs):
self.enabled = w.statedb
w._persistence = None
super().__init__(w, **kwargs)
def create(self, w):
w._persistence = w.state.Persistent(w.state, w.statedb, w.app.clock)
atexit.register(w._persistence.save)
| StateDB |
python | kubernetes-client__python | kubernetes/e2e_test/test_watch.py | {
"start": 1129,
"end": 3220
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = base.get_e2e_configuration()
def test_watch_configmaps(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
# create a configmap
name_a = 'configmap-a-' + short_uuid()
configmap_a = config_map_with_value(name_a, "a")
api.create_namespaced_config_map(
body=configmap_a, namespace='default')
# list all configmaps and extract the resource version
resp = api.list_namespaced_config_map('default', label_selector="e2e-tests=true")
rv = resp.metadata.resource_version
# create another configmap
name_b = 'configmap-b-' + short_uuid()
configmap_b = config_map_with_value(name_b, "b")
api.create_namespaced_config_map(
body=configmap_b, namespace='default')
# patch configmap b
configmap_b['data']['config'] = "{}"
api.patch_namespaced_config_map(
name=name_b, namespace='default', body=configmap_b)
# delete all configmaps
api.delete_collection_namespaced_config_map(
namespace='default', label_selector="e2e-tests=true")
w = watch.Watch()
# expect to observe all events happened after the initial LIST
expect = ['ADDED', 'MODIFIED', 'DELETED', 'DELETED']
i = 0
# start watching with the resource version we got from the LIST
for event in w.stream(api.list_namespaced_config_map,
namespace='default',
resource_version=rv,
timeout_seconds=5,
label_selector="e2e-tests=true"):
self.assertEqual(event['type'], expect[i])
# Kubernetes doesn't guarantee the order of the two objects
# being deleted
if i < 2:
self.assertEqual(event['object'].metadata.name, name_b)
i = i + 1
self.assertEqual(i, 4)
| TestClient |
python | zarr-developers__zarr-python | src/zarr/_cli/cli.py | {
"start": 517,
"end": 576
} | class ____(str, Enum):
v2 = "v2"
v3 = "v3"
| ZarrFormat |
python | numba__numba | numba/parfors/parfor.py | {
"start": 3765,
"end": 17419
} | class ____(object):
def __new__(cls, *args):
return range(*args)
def min_parallel_impl(return_type, arg):
# XXX: use prange for 1D arrays since pndindex returns a 1-tuple instead of
# integer. This causes type and fusion issues.
if arg.ndim == 0:
def min_1(in_arr):
return in_arr[()]
elif arg.ndim == 1:
if isinstance(arg.dtype, (types.NPDatetime, types.NPTimedelta)):
# NaT is always returned if it is in the array
def min_1(in_arr):
numba.parfors.parfor.init_prange()
min_checker(len(in_arr))
val = numba.cpython.builtins.get_type_max_value(in_arr.dtype)
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val = datetime_minimum(val, in_arr[i])
return val
else:
def min_1(in_arr):
numba.parfors.parfor.init_prange()
min_checker(len(in_arr))
val = numba.cpython.builtins.get_type_max_value(in_arr.dtype)
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val = min(val, in_arr[i])
return val
else:
def min_1(in_arr):
numba.parfors.parfor.init_prange()
min_checker(len(in_arr))
val = numba.cpython.builtins.get_type_max_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = min(val, in_arr[i])
return val
return min_1
def max_parallel_impl(return_type, arg):
if arg.ndim == 0:
def max_1(in_arr):
return in_arr[()]
elif arg.ndim == 1:
if isinstance(arg.dtype, (types.NPDatetime, types.NPTimedelta)):
# NaT is always returned if it is in the array
def max_1(in_arr):
numba.parfors.parfor.init_prange()
max_checker(len(in_arr))
val = numba.cpython.builtins.get_type_min_value(in_arr.dtype)
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val = datetime_maximum(val, in_arr[i])
return val
else:
def max_1(in_arr):
numba.parfors.parfor.init_prange()
max_checker(len(in_arr))
val = numba.cpython.builtins.get_type_min_value(in_arr.dtype)
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val = max(val, in_arr[i])
return val
else:
def max_1(in_arr):
numba.parfors.parfor.init_prange()
max_checker(len(in_arr))
val = numba.cpython.builtins.get_type_min_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = max(val, in_arr[i])
return val
return max_1
def argmin_parallel_impl(in_arr):
numba.parfors.parfor.init_prange()
argmin_checker(len(in_arr))
A = in_arr.ravel()
init_val = numba.cpython.builtins.get_type_max_value(A.dtype)
ival = typing.builtins.IndexValue(0, init_val)
for i in numba.parfors.parfor.internal_prange(len(A)):
curr_ival = typing.builtins.IndexValue(i, A[i])
ival = min(ival, curr_ival)
return ival.index
def argmax_parallel_impl(in_arr):
numba.parfors.parfor.init_prange()
argmax_checker(len(in_arr))
A = in_arr.ravel()
init_val = numba.cpython.builtins.get_type_min_value(A.dtype)
ival = typing.builtins.IndexValue(0, init_val)
for i in numba.parfors.parfor.internal_prange(len(A)):
curr_ival = typing.builtins.IndexValue(i, A[i])
ival = max(ival, curr_ival)
return ival.index
def dotvv_parallel_impl(a, b):
numba.parfors.parfor.init_prange()
l = a.shape[0]
m = b.shape[0]
# TODO: investigate assert_equiv
#assert_equiv("sizes of l, m do not match", l, m)
s = 0
for i in numba.parfors.parfor.internal_prange(l):
s += a[i] * b[i]
return s
def dotvm_parallel_impl(a, b):
numba.parfors.parfor.init_prange()
l = a.shape
m, n = b.shape
# TODO: investigate assert_equiv
#assert_equiv("Sizes of l, m do not match", l, m)
c = np.zeros(n, a.dtype)
# TODO: evaluate dotvm implementation options
#for i in prange(n):
# s = 0
# for j in range(m):
# s += a[j] * b[j, i]
# c[i] = s
for i in numba.parfors.parfor.internal_prange(m):
c += a[i] * b[i, :]
return c
def dotmv_parallel_impl(a, b):
numba.parfors.parfor.init_prange()
m, n = a.shape
l = b.shape
# TODO: investigate assert_equiv
#assert_equiv("sizes of n, l do not match", n, l)
c = np.empty(m, a.dtype)
for i in numba.parfors.parfor.internal_prange(m):
s = 0
for j in range(n):
s += a[i, j] * b[j]
c[i] = s
return c
def dot_parallel_impl(return_type, atyp, btyp):
# Note that matrix matrix multiply is not translated.
if (isinstance(atyp, types.npytypes.Array) and
isinstance(btyp, types.npytypes.Array)):
if atyp.ndim == btyp.ndim == 1:
return dotvv_parallel_impl
# TODO: evaluate support for dotvm and enable
#elif atyp.ndim == 1 and btyp.ndim == 2:
# return dotvm_parallel_impl
elif atyp.ndim == 2 and btyp.ndim == 1:
return dotmv_parallel_impl
def sum_parallel_impl(return_type, arg):
zero = return_type(0)
if arg.ndim == 0:
def sum_1(in_arr):
return in_arr[()]
elif arg.ndim == 1:
def sum_1(in_arr):
numba.parfors.parfor.init_prange()
val = zero
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val += in_arr[i]
return val
else:
def sum_1(in_arr):
numba.parfors.parfor.init_prange()
val = zero
for i in numba.pndindex(in_arr.shape):
val += in_arr[i]
return val
return sum_1
def prod_parallel_impl(return_type, arg):
one = return_type(1)
if arg.ndim == 0:
def prod_1(in_arr):
return in_arr[()]
elif arg.ndim == 1:
def prod_1(in_arr):
numba.parfors.parfor.init_prange()
val = one
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val *= in_arr[i]
return val
else:
def prod_1(in_arr):
numba.parfors.parfor.init_prange()
val = one
for i in numba.pndindex(in_arr.shape):
val *= in_arr[i]
return val
return prod_1
def mean_parallel_impl(return_type, arg):
# can't reuse sum since output type is different
zero = return_type(0)
if arg.ndim == 0:
def mean_1(in_arr):
return in_arr[()]
elif arg.ndim == 1:
def mean_1(in_arr):
numba.parfors.parfor.init_prange()
val = zero
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val += in_arr[i]
return val/len(in_arr)
else:
def mean_1(in_arr):
numba.parfors.parfor.init_prange()
val = zero
for i in numba.pndindex(in_arr.shape):
val += in_arr[i]
return val/in_arr.size
return mean_1
def var_parallel_impl(return_type, arg):
if arg.ndim == 0:
def var_1(in_arr):
return 0
elif arg.ndim == 1:
def var_1(in_arr):
# Compute the mean
m = in_arr.mean()
# Compute the sum of square diffs
numba.parfors.parfor.init_prange()
ssd = 0
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
val = in_arr[i] - m
ssd += np.real(val * np.conj(val))
return ssd / len(in_arr)
else:
def var_1(in_arr):
# Compute the mean
m = in_arr.mean()
# Compute the sum of square diffs
numba.parfors.parfor.init_prange()
ssd = 0
for i in numba.pndindex(in_arr.shape):
val = in_arr[i] - m
ssd += np.real(val * np.conj(val))
return ssd / in_arr.size
return var_1
def std_parallel_impl(return_type, arg):
def std_1(in_arr):
return in_arr.var() ** 0.5
return std_1
def arange_parallel_impl(return_type, *args, dtype=None):
inferred_dtype = as_dtype(return_type.dtype)
def arange_1(stop):
return np.arange(0, stop, 1, inferred_dtype)
def arange_1_dtype(stop, dtype):
return np.arange(0, stop, 1, dtype)
def arange_2(start, stop):
return np.arange(start, stop, 1, inferred_dtype)
def arange_2_dtype(start, stop, dtype):
return np.arange(start, stop, 1, dtype)
def arange_3(start, stop, step):
return np.arange(start, stop, step, inferred_dtype)
def arange_3_dtype(start, stop, step, dtype):
return np.arange(start, stop, step, dtype)
if any(isinstance(a, types.Complex) for a in args):
def arange_4(start, stop, step, dtype):
numba.parfors.parfor.init_prange()
nitems_c = (stop - start) / step
nitems_r = math.ceil(nitems_c.real)
nitems_i = math.ceil(nitems_c.imag)
nitems = int(max(min(nitems_i, nitems_r), 0))
arr = np.empty(nitems, dtype)
for i in numba.parfors.parfor.internal_prange(nitems):
arr[i] = start + i * step
return arr
else:
def arange_4(start, stop, step, dtype):
numba.parfors.parfor.init_prange()
nitems_r = math.ceil((stop - start) / step)
nitems = int(max(nitems_r, 0))
arr = np.empty(nitems, dtype)
val = start
for i in numba.parfors.parfor.internal_prange(nitems):
arr[i] = start + i * step
return arr
if len(args) == 1:
return arange_1 if dtype is None else arange_1_dtype
elif len(args) == 2:
return arange_2 if dtype is None else arange_2_dtype
elif len(args) == 3:
return arange_3 if dtype is None else arange_3_dtype
elif len(args) == 4:
return arange_4
else:
raise ValueError("parallel arange with types {}".format(args))
def linspace_parallel_impl(return_type, *args):
dtype = as_dtype(return_type.dtype)
def linspace_2(start, stop):
return np.linspace(start, stop, 50)
def linspace_3(start, stop, num):
numba.parfors.parfor.init_prange()
arr = np.empty(num, dtype)
div = num - 1
delta = stop - start
arr[0] = start
for i in numba.parfors.parfor.internal_prange(num):
arr[i] = start + delta * (i / div)
return arr
if len(args) == 2:
return linspace_2
elif len(args) == 3:
return linspace_3
else:
raise ValueError("parallel linspace with types {}".format(args))
swap_functions_map = {
('argmin', 'numpy'): lambda r,a: argmin_parallel_impl,
('argmax', 'numpy'): lambda r,a: argmax_parallel_impl,
('min', 'numpy'): min_parallel_impl,
('max', 'numpy'): max_parallel_impl,
('amin', 'numpy'): min_parallel_impl,
('amax', 'numpy'): max_parallel_impl,
('sum', 'numpy'): sum_parallel_impl,
('prod', 'numpy'): prod_parallel_impl,
('mean', 'numpy'): mean_parallel_impl,
('var', 'numpy'): var_parallel_impl,
('std', 'numpy'): std_parallel_impl,
('dot', 'numpy'): dot_parallel_impl,
('arange', 'numpy'): arange_parallel_impl,
('linspace', 'numpy'): linspace_parallel_impl,
}
def fill_parallel_impl(return_type, arr, val):
"""Parallel implementation of ndarray.fill. The array on
which to operate is retrieved from get_call_name and
is passed along with the value to fill.
"""
if arr.ndim == 1:
def fill_1(in_arr, val):
numba.parfors.parfor.init_prange()
for i in numba.parfors.parfor.internal_prange(len(in_arr)):
in_arr[i] = val
return None
else:
def fill_1(in_arr, val):
numba.parfors.parfor.init_prange()
for i in numba.pndindex(in_arr.shape):
in_arr[i] = val
return None
return fill_1
replace_functions_ndarray = {
'fill': fill_parallel_impl,
}
@register_jitable
def max_checker(arr_size):
if arr_size == 0:
raise ValueError(("zero-size array to reduction operation "
"maximum which has no identity"))
@register_jitable
def min_checker(arr_size):
if arr_size == 0:
raise ValueError(("zero-size array to reduction operation "
"minimum which has no identity"))
@register_jitable
def argmin_checker(arr_size):
if arr_size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
@register_jitable
def argmax_checker(arr_size):
if arr_size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
checker_impl = namedtuple('checker_impl', ['name', 'func'])
replace_functions_checkers_map = {
('argmin', 'numpy') : checker_impl('argmin_checker', argmin_checker),
('argmax', 'numpy') : checker_impl('argmax_checker', argmax_checker),
('min', 'numpy') : checker_impl('min_checker', min_checker),
('max', 'numpy') : checker_impl('max_checker', max_checker),
('amin', 'numpy') : checker_impl('min_checker', min_checker),
('amax', 'numpy') : checker_impl('max_checker', max_checker),
}
| internal_prange |
python | Pylons__pyramid | tests/test_config/test_views.py | {
"start": 105693,
"end": 107842
} | class ____(unittest.TestCase):
def _makeOne(self, view1, view2):
from pyramid.config.views import runtime_exc_view
return runtime_exc_view(view1, view2)
def test_call(self):
def view1(context, request):
return 'OK'
def view2(context, request): # pragma: no cover
raise AssertionError
result_view = self._makeOne(view1, view2)
request = DummyRequest()
result = result_view(None, request)
self.assertEqual(result, 'OK')
def test_call_dispatches_on_exception(self):
def view1(context, request): # pragma: no cover
raise AssertionError
def view2(context, request):
return 'OK'
result_view = self._makeOne(view1, view2)
request = DummyRequest()
request.exception = Exception()
result = result_view(None, request)
self.assertEqual(result, 'OK')
def test_permitted(self):
def errfn(context, request): # pragma: no cover
raise AssertionError
def view1(context, request): # pragma: no cover
raise AssertionError
view1.__permitted__ = lambda c, r: 'OK'
def view2(context, request): # pragma: no cover
raise AssertionError
view2.__permitted__ = errfn
result_view = self._makeOne(view1, view2)
request = DummyRequest()
result = result_view.__permitted__(None, request)
self.assertEqual(result, 'OK')
def test_permitted_dispatches_on_exception(self):
def errfn(context, request): # pragma: no cover
raise AssertionError
def view1(context, request): # pragma: no cover
raise AssertionError
view1.__permitted__ = errfn
def view2(context, request): # pragma: no cover
raise AssertionError
view2.__permitted__ = lambda c, r: 'OK'
result_view = self._makeOne(view1, view2)
request = DummyRequest()
request.exception = Exception()
result = result_view.__permitted__(None, request)
self.assertEqual(result, 'OK')
| Test_runtime_exc_view |
python | matplotlib__matplotlib | lib/matplotlib/hatch.py | {
"start": 3507,
"end": 5035
} | class ____(HatchPatternBase):
filled = False
def __init__(self, hatch, density):
if self.num_rows == 0:
self.num_shapes = 0
self.num_vertices = 0
else:
self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
(self.num_rows // 2) * self.num_rows)
self.num_vertices = (self.num_shapes *
len(self.shape_vertices) *
(1 if self.filled else 2))
def set_vertices_and_codes(self, vertices, codes):
offset = 1.0 / self.num_rows
shape_vertices = self.shape_vertices * offset * self.size
shape_codes = self.shape_codes
if not self.filled:
shape_vertices = np.concatenate( # Forward, then backward.
[shape_vertices, shape_vertices[::-1] * 0.9])
shape_codes = np.concatenate([shape_codes, shape_codes])
vertices_parts = []
codes_parts = []
for row in range(self.num_rows + 1):
if row % 2 == 0:
cols = np.linspace(0, 1, self.num_rows + 1)
else:
cols = np.linspace(offset / 2, 1 - offset / 2, self.num_rows)
row_pos = row * offset
for col_pos in cols:
vertices_parts.append(shape_vertices + [col_pos, row_pos])
codes_parts.append(shape_codes)
np.concatenate(vertices_parts, out=vertices)
np.concatenate(codes_parts, out=codes)
| Shapes |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 23235,
"end": 25331
} | class ____(nn.Module):
def __init__(self, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents, mask):
"""
Args:
x (torch.Tensor): image features
shape (b, n1, D)
latent (torch.Tensor): latent features
shape (b, n2, D); n2: num of latent tokens
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
h = self.heads
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(
2, dim=-1
) # each: batch_size, max_protein_length+num_latents, dim_head*num_heads
q = q.view(q.size(0), q.size(1), h, -1).permute(0, 2, 1, 3)
k = k.view(k.size(0), k.size(1), h, -1).permute(0, 2, 1, 3)
v = v.view(v.size(0), v.size(1), h, -1).permute(0, 2, 1, 3)
q = q * self.scale # batch_size, num_heads, num_latents, dim_head
# attention
sim = torch.matmul(q, k.transpose(-1, -2))
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
bs, nh, skd, okd = sim.shape
ones = torch.ones(nh, skd).to(mask.device) # Create a tensor of ones with shape (nh, skd)
mask_exp = mask[:, None, None, :]
ones_exp = ones[None, :, :, None]
mask = mask_exp * ones_exp
sim = sim.masked_fill((1 - mask).bool(), -1e4)
attn = sim.softmax(dim=-1)
out = torch.matmul(attn, v)
out = out.permute(0, 2, 1, 3)
# [batch, seq, head, features] -> [batch, seq, head*features]
out = out.reshape(out.size(0), out.size(1), -1)
return self.to_out(out)
| EvollaSequenceCompressorAttention |
python | django__django | django/core/mail/backends/base.py | {
"start": 34,
"end": 1683
} | class ____:
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""
Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
try:
self.open()
except Exception:
self.close()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Send one or more EmailMessage objects and return the number of email
messages sent.
"""
raise NotImplementedError(
"subclasses of BaseEmailBackend must override send_messages() method"
)
| BaseEmailBackend |
python | bokeh__bokeh | src/bokeh/models/annotations/dimensional.py | {
"start": 5093,
"end": 5705
} | class ____(CustomDimensional):
""" Imperial units of length measurement.
"""
# explicit __init__ to support Init signatures
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
basis = Override(default={
"in": ( 1/12, "in", "inch" ),
"ft": ( 1, "ft", "foot" ),
"yd": ( 3, "yd", "yard" ),
"ch": ( 66, "ch", "chain" ),
"fur": ( 660, "fur", "furlong"),
"mi": ( 5280, "mi", "mile" ),
"lea": (15840, "lea", "league" ),
})
ticks = Override(default=[1, 3, 6, 12, 60])
| ImperialLength |
python | nryoung__algorithms | tests/test_data_structures.py | {
"start": 22727,
"end": 24282
} | class ____(unittest.TestCase):
def setUp(self):
super(TestLCPSuffixArrays, self).setUp()
self.case_1 = "aaaaaa"
self.s_array_1 = [5, 4, 3, 2, 1, 0]
self.rank_1 = [5, 4, 3, 2, 1, 0]
self.lcp_1 = [1, 2, 3, 4, 5, 0]
self.case_2 = "abcabcdd"
self.s_array_2 = [0, 2, 4, 1, 3, 5, 7, 6]
self.rank_2 = [0, 3, 1, 4, 2, 5, 7, 6]
self.lcp_2 = [3, 0, 2, 0, 1, 0, 1, 0]
self.case_3 = "kmckirrrmppp"
self.s_array_3 = [3, 4, 0, 2, 1, 11, 10, 9, 5, 8, 7, 6]
self.rank_3 = [2, 4, 3, 0, 1, 8, 11, 10, 9, 7, 6, 5]
self.lcp_3 = [0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0]
def test_lcp_array(self):
lcp = lcp_array.lcp_array(self.case_1, self.s_array_1, self.rank_1)
self.assertEqual(lcp, self.lcp_1)
lcp = lcp_array.lcp_array(self.case_2, self.s_array_2, self.rank_2)
self.assertEqual(lcp, self.lcp_2)
lcp = lcp_array.lcp_array(self.case_3, self.s_array_3, self.rank_3)
self.assertEqual(lcp, self.lcp_3)
def test_suffix_array(self):
s_array, rank = lcp_array.suffix_array(self.case_1)
self.assertEqual(s_array, self.s_array_1)
self.assertEqual(rank, self.rank_1)
s_array, rank = lcp_array.suffix_array(self.case_2)
self.assertEqual(s_array, self.s_array_2)
self.assertEqual(rank, self.rank_2)
s_array, rank = lcp_array.suffix_array(self.case_3)
self.assertEqual(s_array, self.s_array_3)
self.assertEqual(rank, self.rank_3)
| TestLCPSuffixArrays |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 38395,
"end": 38807
} | class ____(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
| _tzicalvtzcomp |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/identity_n_op_py_test.py | {
"start": 881,
"end": 2514
} | class ____(test.TestCase):
def testInt32String_6(self):
value0, value1 = self.evaluate(
array_ops.identity_n([[1, 2, 3, 4, 5, 6],
[b"a", b"b", b"C", b"d", b"E", b"f", b"g"]]))
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value0)
self.assertAllEqual(
np.array([b"a", b"b", b"C", b"d", b"E", b"f", b"g"]), value1)
def testInt32_shapes(self):
inp0 = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
inp1 = constant_op.constant([11, 21, 31, 41, 51, 61], shape=[3, 2])
inp2 = constant_op.constant(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], shape=[5, 3])
value0, value1, value2 = self.evaluate(
array_ops.identity_n([inp0, inp1, inp2]))
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value0)
self.assertAllEqual(np.array([[11, 21], [31, 41], [51, 61]]), value1)
self.assertAllEqual(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]),
value2)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
[value] = self.evaluate(array_ops.identity_n([source]))
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.cached_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEqual(shape, tensor.get_shape())
self.assertEqual(shape, array_ops.identity_n([tensor])[0].get_shape())
self.assertEqual(shape, array_ops.identity_n([array_2x3])[0].get_shape())
if __name__ == "__main__":
test.main()
| IdentityNOpTest |
python | readthedocs__readthedocs.org | readthedocs/builds/forms.py | {
"start": 875,
"end": 4885
} | class ____(forms.ModelForm):
project = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Version
states_fields = ["active", "hidden"]
privacy_fields = ["privacy_level"]
fields = (
"project",
"slug",
*states_fields,
*privacy_fields,
)
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project")
super().__init__(*args, **kwargs)
field_sets = [
Fieldset(
_("States"),
HTML(render_to_string("projects/project_version_states_help_text.html")),
*self.Meta.states_fields,
),
]
if settings.ALLOW_PRIVATE_REPOS:
field_sets.append(
Fieldset(
_("Privacy"),
*self.Meta.privacy_fields,
)
)
else:
self.fields.pop("privacy_level")
field_sets.append(
HTML(
render_to_string(
"projects/project_version_submit.html",
context={"version": self.instance},
)
)
)
# Don't allow changing the slug of machine created versions
# (stable/latest), as we rely on the slug to identify them.
if self.instance and self.instance.machine:
self.fields["slug"].disabled = True
self.helper = FormHelper()
self.helper.layout = Layout(*field_sets)
# We need to know if the version was active before the update.
# We use this value in the save method.
self._was_active = self.instance.active if self.instance else False
self._previous_slug = self.instance.slug if self.instance else None
def clean_active(self):
active = self.cleaned_data["active"]
if self._is_default_version() and not active:
msg = _(
"{version} is the default version of the project, it should be active.",
)
raise forms.ValidationError(
msg.format(version=self.instance.verbose_name),
)
return active
def _is_default_version(self):
project = self.instance.project
return project.default_version == self.instance.slug
def clean_slug(self):
slug = self.cleaned_data["slug"]
validated_slug = generate_version_slug(slug)
if slug != validated_slug:
msg = _(
"The slug can contain lowercase letters, numbers, dots, dashes or underscores, "
f"and it must start with a lowercase letter or a number. Consider using '{validated_slug}'."
)
raise forms.ValidationError(msg)
# NOTE: Django already checks for unique slugs and raises a ValidationError,
# but that message is attached to the whole form instead of the the slug field.
# So we do the check here to provide a better error message.
if self.project.versions.filter(slug=slug).exclude(pk=self.instance.pk).exists():
raise forms.ValidationError(_("A version with that slug already exists."))
return slug
def clean_project(self):
return self.project
def save(self, commit=True):
# If the slug was changed, and the version was active,
# we need to delete all the resources, since the old slug is used in several places.
# NOTE: we call clean_resources with the previous slug,
# as all resources are associated with that slug.
if "slug" in self.changed_data and self._was_active:
self.instance.clean_resources(version_slug=self._previous_slug)
# We need to set the flag to False,
# so the post_save method triggers a new build.
self._was_active = False
obj = super().save(commit=commit)
obj.post_save(was_active=self._was_active)
return obj
| VersionForm |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 237650,
"end": 241702
} | class ____(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`. The distribution was introduced in
[2]_, see also [1]_ for further information.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
References
----------
.. [1] "Nakagami distribution", Wikipedia
https://en.wikipedia.org/wiki/Nakagami_distribution
.. [2] M. Nakagami, "The m-distribution - A general formula of intensity
distribution of rapid fading", Statistical methods in radio wave
propagation, Pergamon Press, 1960, 3-36.
:doi:`10.1016/B978-0-08-009306-2.50005-4`
%(example)s
"""
def _argcheck(self, nu):
return nu > 0
def _shape_info(self):
return [_ShapeInfo("nu", False, (0, np.inf), (False, False))]
def _pdf(self, x, nu):
return np.exp(self._logpdf(x, nu))
def _logpdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
sc.xlogy(2*nu - 1, x) - nu*x**2)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _sf(self, x, nu):
return sc.gammaincc(nu, nu*x*x)
def _isf(self, p, nu):
return np.sqrt(1/nu * sc.gammainccinv(nu, p))
def _stats(self, nu):
mu = sc.poch(nu, 0.5)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
def _entropy(self, nu):
shape = np.shape(nu)
# because somehow this isn't taken care of by the infrastructure...
nu = np.atleast_1d(nu)
A = sc.gammaln(nu)
B = nu - (nu - 0.5) * sc.digamma(nu)
C = -0.5 * np.log(nu) - np.log(2)
h = A + B + C
# This is the asymptotic sum of A and B (see gh-17868)
norm_entropy = stats.norm._entropy()
# Above, this is lost to rounding error for large nu, so use the
# asymptotic sum when the approximation becomes accurate
i = nu > 5e4 # roundoff error ~ approximation error
# -1 / (12 * nu) is the O(1/nu) term; see gh-17929
h[i] = C[i] + norm_entropy - 1/(12*nu[i])
return h.reshape(shape)[()]
def _rvs(self, nu, size=None, random_state=None):
# this relationship can be found in [1] or by a direct calculation
return np.sqrt(random_state.standard_gamma(nu, size=size) / nu)
def _fitstart(self, data, args=None):
if isinstance(data, CensoredData):
data = data._uncensor()
if args is None:
args = (1.0,) * self.numargs
# Analytical justified estimates
# see: https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_nakagami.html
loc = np.min(data)
scale = np.sqrt(np.sum((data - loc)**2) / len(data))
return args + (loc, scale)
nakagami = nakagami_gen(a=0.0, name="nakagami")
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = sc.xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = sc.ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return xpx.apply_where(
corr > 0,
(res, corr),
lambda r, c: r + np.log(c),
fill_value=-np.inf)
| nakagami_gen |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 77588,
"end": 79608
} | class ____(ModelOutput):
"""
Base class for outputs of semantic segmentation models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
Classification scores for each pixel.
<Tip warning={true}>
The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
original image size as post-processing. You should always check your logits shape and resize as needed.
</Tip>
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| SemanticSegmenterOutput |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_axes.py | {
"start": 277247,
"end": 340516
} | class ____(mtransforms.Transform):
input_dims = 1
output_dims = 1
def __init__(self, dx):
self.dx = dx
def transform(self, values):
return values + self.dx
def inverted(self):
return _Translation(-self.dx)
@image_comparison(['secondary_xy.png'], style='mpl20',
tol=0 if platform.machine() == 'x86_64' else 0.027)
def test_secondary_xy():
fig, axs = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
for nn, ax in enumerate(axs):
ax.plot(np.arange(2, 11), np.arange(2, 11))
if nn == 0:
secax = ax.secondary_xaxis
else:
secax = ax.secondary_yaxis
secax(0.2, functions=(invert, invert))
secax(0.4, functions=(lambda x: 2 * x, lambda x: x / 2))
secax(0.6, functions=(lambda x: x**2, lambda x: x**(1/2)))
secax(0.8)
secax("top" if nn == 0 else "right", functions=_Translation(2))
secax(6.25, transform=ax.transData)
def test_secondary_fail():
fig, ax = plt.subplots()
ax.plot(np.arange(2, 11), np.arange(2, 11))
with pytest.raises(ValueError):
ax.secondary_xaxis(0.2, functions=(lambda x: 1 / x))
with pytest.raises(ValueError):
ax.secondary_xaxis('right')
with pytest.raises(ValueError):
ax.secondary_yaxis('bottom')
with pytest.raises(TypeError):
ax.secondary_xaxis(0.2, transform='error')
def test_secondary_resize():
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(np.arange(2, 11), np.arange(2, 11))
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
ax.secondary_xaxis('top', functions=(invert, invert))
fig.canvas.draw()
fig.set_size_inches((7, 4))
assert_allclose(ax.get_position().extents, [0.125, 0.1, 0.9, 0.9])
def test_secondary_minorloc():
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(np.arange(2, 11), np.arange(2, 11))
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
secax = ax.secondary_xaxis('top', functions=(invert, invert))
assert isinstance(secax._axis.get_minor_locator(),
mticker.NullLocator)
secax.minorticks_on()
assert isinstance(secax._axis.get_minor_locator(),
mticker.AutoMinorLocator)
ax.set_xscale('log')
plt.draw()
assert isinstance(secax._axis.get_minor_locator(),
mticker.LogLocator)
ax.set_xscale('linear')
plt.draw()
assert isinstance(secax._axis.get_minor_locator(),
mticker.NullLocator)
def test_secondary_formatter():
fig, ax = plt.subplots()
ax.set_xscale("log")
secax = ax.secondary_xaxis("top")
secax.xaxis.set_major_formatter(mticker.ScalarFormatter())
fig.canvas.draw()
assert isinstance(
secax.xaxis.get_major_formatter(), mticker.ScalarFormatter)
def test_secondary_init_xticks():
fig, ax = plt.subplots()
secax = ax.secondary_xaxis(1, xticks=[0, 1])
assert isinstance(secax.xaxis.get_major_locator(), mticker.FixedLocator)
with pytest.raises(TypeError):
secax.set_yticks([0, 1])
secax = ax.secondary_yaxis(1, yticks=[0, 1])
assert isinstance(secax.yaxis.get_major_locator(), mticker.FixedLocator)
with pytest.raises(TypeError):
secax.set_xticks([0, 1])
def test_secondary_repr():
fig, ax = plt.subplots()
secax = ax.secondary_xaxis("top")
assert repr(secax) == '<SecondaryAxis: >'
@image_comparison(['axis_options.png'], remove_text=True, style='mpl20')
def test_axis_options():
fig, axes = plt.subplots(2, 3)
for i, option in enumerate(('scaled', 'tight', 'image')):
# Draw a line and a circle fitting within the boundaries of the line
# The circle should look like a circle for 'scaled' and 'image'
# High/narrow aspect ratio
axes[0, i].plot((1, 2), (1, 3.2))
axes[0, i].axis(option)
axes[0, i].add_artist(mpatches.Circle((1.5, 1.5), radius=0.5,
facecolor='none', edgecolor='k'))
# Low/wide aspect ratio
axes[1, i].plot((1, 2.25), (1, 1.75))
axes[1, i].axis(option)
axes[1, i].add_artist(mpatches.Circle((1.5, 1.25), radius=0.25,
facecolor='none', edgecolor='k'))
def color_boxes(fig, ax):
"""
Helper for the tests below that test the extents of various Axes elements
"""
fig.canvas.draw()
bbaxis = []
for nn, axx in enumerate([ax.xaxis, ax.yaxis]):
bb = axx.get_tightbbox()
if bb:
axisr = mpatches.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=0.7, edgecolor='y', facecolor="none", transform=None,
zorder=3)
fig.add_artist(axisr)
bbaxis += [bb]
bbspines = []
for nn, a in enumerate(['bottom', 'top', 'left', 'right']):
bb = ax.spines[a].get_window_extent()
spiner = mpatches.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=0.7, edgecolor="green", facecolor="none", transform=None,
zorder=3)
fig.add_artist(spiner)
bbspines += [bb]
bb = ax.get_window_extent()
rect2 = mpatches.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=1.5, edgecolor="magenta", facecolor="none", transform=None,
zorder=2)
fig.add_artist(rect2)
bbax = bb
bb2 = ax.get_tightbbox()
rect2 = mpatches.Rectangle(
(bb2.x0, bb2.y0), width=bb2.width, height=bb2.height,
linewidth=3, edgecolor="red", facecolor="none", transform=None,
zorder=1)
fig.add_artist(rect2)
bbtb = bb2
return bbaxis, bbspines, bbax, bbtb
def test_normal_axes():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
plt.close(fig)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
target = [
[123.375, 75.88888888888886, 983.25, 33.0],
[85.51388888888889, 99.99999999999997, 53.375, 993.0]
]
for nn, b in enumerate(bbaxis):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbax.bounds, targetbb.bounds, decimal=2)
target = [85.5138, 75.88888, 1021.11, 1017.11]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbtb.bounds, targetbb.bounds, decimal=2)
# test that get_position roundtrips to get_window_extent
axbb = ax.get_position().transformed(fig.transFigure).bounds
assert_array_almost_equal(axbb, ax.get_window_extent().bounds, decimal=2)
def test_nodecorator():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.set(xticklabels=[], yticklabels=[])
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
for nn, b in enumerate(bbaxis):
assert b is None
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_allclose(b.bounds, targetbb.bounds, atol=1e-2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 120., 930., 960.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_displaced_spine():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.set(xticklabels=[], yticklabels=[])
ax.spines.bottom.set_position(('axes', -0.1))
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [
[150., 24., 930., 11.111111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for target, bbspine in zip(targets, bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbspine.bounds, targetbb.bounds, atol=1e-2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 24., 930., 1056.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_tickdirs():
"""
Switch the tickdirs and make sure the bboxes switch with them
"""
targets = [[[150.0, 120.0, 930.0, 11.1111],
[150.0, 120.0, 11.111, 960.0]],
[[150.0, 108.8889, 930.0, 11.111111111111114],
[138.889, 120, 11.111, 960.0]],
[[150.0, 114.44444444444441, 930.0, 11.111111111111114],
[144.44444444444446, 119.999, 11.111, 960.0]]]
for dnum, dirs in enumerate(['in', 'out', 'inout']):
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.tick_params(direction=dirs)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
for nn, num in enumerate([0, 2]):
targetbb = mtransforms.Bbox.from_bounds(*targets[dnum][nn])
assert_allclose(
bbspines[num].bounds, targetbb.bounds, atol=1e-2)
def test_minor_accountedfor():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 108.88888888888886, 930.0, 11.111111111111114],
[138.8889, 119.9999, 11.1111, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(
bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
ax.minorticks_on()
ax.tick_params(axis='both', which='minor', length=30)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 36.66666666666663, 930.0, 83.33333333333334],
[66.6667, 120.0, 83.3333, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(
bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)
@check_figures_equal()
def test_axis_bool_arguments(fig_test, fig_ref):
# Test if False and "off" give the same
fig_test.add_subplot(211).axis(False)
fig_ref.add_subplot(211).axis("off")
# Test if True after False gives the same as "on"
ax = fig_test.add_subplot(212)
ax.axis(False)
ax.axis(True)
fig_ref.add_subplot(212).axis("on")
def test_axis_extent_arg():
fig, ax = plt.subplots()
xmin = 5
xmax = 10
ymin = 15
ymax = 20
extent = ax.axis([xmin, xmax, ymin, ymax])
# test that the docstring is correct
assert tuple(extent) == (xmin, xmax, ymin, ymax)
# test that limits were set per the docstring
assert (xmin, xmax) == ax.get_xlim()
assert (ymin, ymax) == ax.get_ylim()
def test_axis_extent_arg2():
# Same as test_axis_extent_arg, but with keyword arguments
fig, ax = plt.subplots()
xmin = 5
xmax = 10
ymin = 15
ymax = 20
extent = ax.axis(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# test that the docstring is correct
assert tuple(extent) == (xmin, xmax, ymin, ymax)
# test that limits were set per the docstring
assert (xmin, xmax) == ax.get_xlim()
assert (ymin, ymax) == ax.get_ylim()
def test_hist_auto_bins():
_, bins, _ = plt.hist([[1, 2, 3], [3, 4, 5, 6]], bins='auto')
assert bins[0] <= 1
assert bins[-1] >= 6
def test_hist_nan_data():
fig, (ax1, ax2) = plt.subplots(2)
data = [1, 2, 3]
nan_data = data + [np.nan]
bins, edges, _ = ax1.hist(data)
with np.errstate(invalid='ignore'):
nanbins, nanedges, _ = ax2.hist(nan_data)
np.testing.assert_allclose(bins, nanbins)
np.testing.assert_allclose(edges, nanedges)
def test_hist_range_and_density():
_, bins, _ = plt.hist(np.random.rand(10), "auto",
range=(0, 1), density=True)
assert bins[0] == 0
assert bins[-1] == 1
def test_bar_errbar_zorder():
# Check that the zorder of errorbars is always greater than the bar they
# are plotted on
fig, ax = plt.subplots()
x = [1, 2, 3]
barcont = ax.bar(x=x, height=x, yerr=x, capsize=5, zorder=3)
data_line, caplines, barlinecols = barcont.errorbar.lines
for bar in barcont.patches:
for capline in caplines:
assert capline.zorder > bar.zorder
for barlinecol in barlinecols:
assert barlinecol.zorder > bar.zorder
def test_set_ticks_inverted():
fig, ax = plt.subplots()
ax.invert_xaxis()
ax.set_xticks([.3, .7])
assert ax.get_xlim() == (1, 0)
ax.set_xticks([-1])
assert ax.get_xlim() == (1, -1)
def test_aspect_nonlinear_adjustable_box():
fig = plt.figure(figsize=(10, 10)) # Square.
ax = fig.add_subplot()
ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.
ax.set(xscale="log", xlim=(1, 10),
yscale="logit", ylim=(1/11, 1/1001),
aspect=1, adjustable="box")
ax.margins(0)
pos = fig.transFigure.transform_bbox(ax.get_position())
assert pos.height / pos.width == pytest.approx(2)
def test_aspect_nonlinear_adjustable_datalim():
fig = plt.figure(figsize=(10, 10)) # Square.
ax = fig.add_axes((.1, .1, .8, .8)) # Square.
ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.
ax.set(xscale="log", xlim=(1, 100),
yscale="logit", ylim=(1 / 101, 1 / 11),
aspect=1, adjustable="datalim")
ax.margins(0)
ax.apply_aspect()
assert ax.get_xlim() == pytest.approx([1*10**(1/2), 100/10**(1/2)])
assert ax.get_ylim() == (1 / 101, 1 / 11)
def test_box_aspect():
# Test if axes with box_aspect=1 has same dimensions
# as axes with aspect equal and adjustable="box"
fig1, ax1 = plt.subplots()
axtwin = ax1.twinx()
axtwin.plot([12, 344])
ax1.set_box_aspect(1)
assert ax1.get_box_aspect() == 1.0
fig2, ax2 = plt.subplots()
ax2.margins(0)
ax2.plot([0, 2], [6, 8])
ax2.set_aspect("equal", adjustable="box")
fig1.canvas.draw()
fig2.canvas.draw()
bb1 = ax1.get_position()
bbt = axtwin.get_position()
bb2 = ax2.get_position()
assert_array_equal(bb1.extents, bb2.extents)
assert_array_equal(bbt.extents, bb2.extents)
def test_box_aspect_custom_position():
# Test if axes with custom position and box_aspect
# behaves the same independent of the order of setting those.
fig1, ax1 = plt.subplots()
ax1.set_position([0.1, 0.1, 0.9, 0.2])
fig1.canvas.draw()
ax1.set_box_aspect(1.)
fig2, ax2 = plt.subplots()
ax2.set_box_aspect(1.)
fig2.canvas.draw()
ax2.set_position([0.1, 0.1, 0.9, 0.2])
fig1.canvas.draw()
fig2.canvas.draw()
bb1 = ax1.get_position()
bb2 = ax2.get_position()
assert_array_equal(bb1.extents, bb2.extents)
def test_bbox_aspect_axes_init():
# Test that box_aspect can be given to axes init and produces
# all equal square axes.
fig, axs = plt.subplots(2, 3, subplot_kw=dict(box_aspect=1),
constrained_layout=True)
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
sizes = []
for ax in axs.flat:
bb = ax.get_window_extent(renderer)
sizes.extend([bb.width, bb.height])
assert_allclose(sizes, sizes[0])
def test_set_aspect_negative():
fig, ax = plt.subplots()
with pytest.raises(ValueError, match="must be finite and positive"):
ax.set_aspect(-1)
with pytest.raises(ValueError, match="must be finite and positive"):
ax.set_aspect(0)
with pytest.raises(ValueError, match="must be finite and positive"):
ax.set_aspect(np.inf)
with pytest.raises(ValueError, match="must be finite and positive"):
ax.set_aspect(-np.inf)
def test_redraw_in_frame():
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3])
fig.canvas.draw()
ax.redraw_in_frame()
def test_invisible_axes_events():
# invisible axes should not respond to events...
fig, ax = plt.subplots()
assert fig.canvas.inaxes((200, 200)) is not None
ax.set_visible(False)
assert fig.canvas.inaxes((200, 200)) is None
def test_xtickcolor_is_not_markercolor():
plt.rcParams['lines.markeredgecolor'] = 'white'
ax = plt.axes()
ticks = ax.xaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_markeredgecolor() != 'white'
def test_ytickcolor_is_not_markercolor():
plt.rcParams['lines.markeredgecolor'] = 'white'
ax = plt.axes()
ticks = ax.yaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_markeredgecolor() != 'white'
@pytest.mark.parametrize('axis', ('x', 'y'))
@pytest.mark.parametrize('auto', (True, False, None))
def test_unautoscale(axis, auto):
fig, ax = plt.subplots()
x = np.arange(100)
y = np.linspace(-.1, .1, 100)
ax.scatter(y, x)
get_autoscale_on = getattr(ax, f'get_autoscale{axis}_on')
set_lim = getattr(ax, f'set_{axis}lim')
get_lim = getattr(ax, f'get_{axis}lim')
post_auto = get_autoscale_on() if auto is None else auto
set_lim((-0.5, 0.5), auto=auto)
assert post_auto == get_autoscale_on()
fig.canvas.draw()
assert_array_equal(get_lim(), (-0.5, 0.5))
@check_figures_equal()
def test_polar_interpolation_steps_variable_r(fig_test, fig_ref):
l, = fig_test.add_subplot(projection="polar").plot([0, np.pi/2], [1, 2])
l.get_path()._interpolation_steps = 100
fig_ref.add_subplot(projection="polar").plot(
np.linspace(0, np.pi/2, 101), np.linspace(1, 2, 101))
@mpl.style.context('default')
def test_autoscale_tiny_sticky():
fig, ax = plt.subplots()
ax.bar(0, 1e-9)
fig.canvas.draw()
assert ax.get_ylim() == (0, 1.05e-9)
def test_xtickcolor_is_not_xticklabelcolor():
plt.rcParams['xtick.color'] = 'yellow'
plt.rcParams['xtick.labelcolor'] = 'blue'
ax = plt.axes()
ticks = ax.xaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_color() == 'yellow'
assert tick.label1.get_color() == 'blue'
def test_ytickcolor_is_not_yticklabelcolor():
plt.rcParams['ytick.color'] = 'yellow'
plt.rcParams['ytick.labelcolor'] = 'blue'
ax = plt.axes()
ticks = ax.yaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_color() == 'yellow'
assert tick.label1.get_color() == 'blue'
def test_xaxis_offsetText_color():
plt.rcParams['xtick.labelcolor'] = 'blue'
ax = plt.axes()
assert ax.xaxis.offsetText.get_color() == 'blue'
plt.rcParams['xtick.color'] = 'yellow'
plt.rcParams['xtick.labelcolor'] = 'inherit'
ax = plt.axes()
assert ax.xaxis.offsetText.get_color() == 'yellow'
def test_yaxis_offsetText_color():
plt.rcParams['ytick.labelcolor'] = 'green'
ax = plt.axes()
assert ax.yaxis.offsetText.get_color() == 'green'
plt.rcParams['ytick.color'] = 'red'
plt.rcParams['ytick.labelcolor'] = 'inherit'
ax = plt.axes()
assert ax.yaxis.offsetText.get_color() == 'red'
@pytest.mark.parametrize('size', [size for size in mfont_manager.font_scalings
if size is not None] + [8, 10, 12])
@mpl.style.context('default')
def test_relative_ticklabel_sizes(size):
mpl.rcParams['xtick.labelsize'] = size
mpl.rcParams['ytick.labelsize'] = size
fig, ax = plt.subplots()
fig.canvas.draw()
for name, axis in zip(['x', 'y'], [ax.xaxis, ax.yaxis]):
for tick in axis.get_major_ticks():
assert tick.label1.get_size() == axis._get_tick_label_size(name)
def test_multiplot_autoscale():
fig = plt.figure()
ax1, ax2 = fig.subplots(2, 1, sharex='all')
ax1.plot([18000, 18250, 18500, 18750], [2, 3, 2, 3])
ax2.axhspan(-5, 5)
xlim = ax1.get_xlim()
assert np.allclose(xlim, [18000, 18800])
def test_sharing_does_not_link_positions():
fig = plt.figure()
ax0 = fig.add_subplot(221)
ax1 = fig.add_axes((.6, .6, .3, .3), sharex=ax0)
init_pos = ax1.get_position()
fig.subplots_adjust(left=0)
assert (ax1.get_position().get_points() == init_pos.get_points()).all()
@check_figures_equal(extensions=["pdf"])
def test_2dcolor_plot(fig_test, fig_ref):
color = np.array([0.1, 0.2, 0.3])
# plot with 1D-color:
axs = fig_test.subplots(5)
axs[0].plot([1, 2], [1, 2], c=color.reshape(-1))
with pytest.warns(match="argument looks like a single numeric RGB"):
axs[1].scatter([1, 2], [1, 2], c=color.reshape(-1))
axs[2].step([1, 2], [1, 2], c=color.reshape(-1))
axs[3].hist(np.arange(10), color=color.reshape(-1))
axs[4].bar(np.arange(10), np.arange(10), color=color.reshape(-1))
# plot with 2D-color:
axs = fig_ref.subplots(5)
axs[0].plot([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[1].scatter([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[2].step([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[3].hist(np.arange(10), color=color.reshape((1, -1)))
axs[4].bar(np.arange(10), np.arange(10), color=color.reshape((1, -1)))
@check_figures_equal()
def test_shared_axes_clear(fig_test, fig_ref):
x = np.arange(0.0, 2*np.pi, 0.01)
y = np.sin(x)
axs = fig_ref.subplots(2, 2, sharex=True, sharey=True)
for ax in axs.flat:
ax.plot(x, y)
axs = fig_test.subplots(2, 2, sharex=True, sharey=True)
for ax in axs.flat:
ax.clear()
ax.plot(x, y)
def test_shared_axes_retick():
fig, axs = plt.subplots(2, 2, sharex='all', sharey='all')
for ax in axs.flat:
ax.plot([0, 2], 'o-')
axs[0, 0].set_xticks([-0.5, 0, 1, 1.5]) # should affect all axes xlims
for ax in axs.flat:
assert ax.get_xlim() == axs[0, 0].get_xlim()
axs[0, 0].set_yticks([-0.5, 0, 2, 2.5]) # should affect all axes ylims
for ax in axs.flat:
assert ax.get_ylim() == axs[0, 0].get_ylim()
@pytest.mark.parametrize('ha', ['left', 'center', 'right'])
def test_ylabel_ha_with_position(ha):
fig = Figure()
ax = fig.subplots()
ax.set_ylabel("test", y=1, ha=ha)
ax.yaxis.set_label_position("right")
assert ax.yaxis.label.get_ha() == ha
def test_bar_label_location_vertical():
ax = plt.gca()
xs, heights = [1, 2], [3, -4]
rects = ax.bar(xs, heights)
labels = ax.bar_label(rects)
assert labels[0].xy == (xs[0], heights[0])
assert labels[0].get_horizontalalignment() == 'center'
assert labels[0].get_verticalalignment() == 'bottom'
assert labels[1].xy == (xs[1], heights[1])
assert labels[1].get_horizontalalignment() == 'center'
assert labels[1].get_verticalalignment() == 'top'
def test_bar_label_location_vertical_yinverted():
ax = plt.gca()
ax.invert_yaxis()
xs, heights = [1, 2], [3, -4]
rects = ax.bar(xs, heights)
labels = ax.bar_label(rects)
assert labels[0].xy == (xs[0], heights[0])
assert labels[0].get_horizontalalignment() == 'center'
assert labels[0].get_verticalalignment() == 'top'
assert labels[1].xy == (xs[1], heights[1])
assert labels[1].get_horizontalalignment() == 'center'
assert labels[1].get_verticalalignment() == 'bottom'
def test_bar_label_location_horizontal():
ax = plt.gca()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects)
assert labels[0].xy == (widths[0], ys[0])
assert labels[0].get_horizontalalignment() == 'left'
assert labels[0].get_verticalalignment() == 'center'
assert labels[1].xy == (widths[1], ys[1])
assert labels[1].get_horizontalalignment() == 'right'
assert labels[1].get_verticalalignment() == 'center'
def test_bar_label_location_horizontal_yinverted():
ax = plt.gca()
ax.invert_yaxis()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects)
assert labels[0].xy == (widths[0], ys[0])
assert labels[0].get_horizontalalignment() == 'left'
assert labels[0].get_verticalalignment() == 'center'
assert labels[1].xy == (widths[1], ys[1])
assert labels[1].get_horizontalalignment() == 'right'
assert labels[1].get_verticalalignment() == 'center'
def test_bar_label_location_horizontal_xinverted():
ax = plt.gca()
ax.invert_xaxis()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects)
assert labels[0].xy == (widths[0], ys[0])
assert labels[0].get_horizontalalignment() == 'right'
assert labels[0].get_verticalalignment() == 'center'
assert labels[1].xy == (widths[1], ys[1])
assert labels[1].get_horizontalalignment() == 'left'
assert labels[1].get_verticalalignment() == 'center'
def test_bar_label_location_horizontal_xyinverted():
ax = plt.gca()
ax.invert_xaxis()
ax.invert_yaxis()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects)
assert labels[0].xy == (widths[0], ys[0])
assert labels[0].get_horizontalalignment() == 'right'
assert labels[0].get_verticalalignment() == 'center'
assert labels[1].xy == (widths[1], ys[1])
assert labels[1].get_horizontalalignment() == 'left'
assert labels[1].get_verticalalignment() == 'center'
def test_bar_label_location_center():
ax = plt.gca()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects, label_type='center')
assert labels[0].xy == (0.5, 0.5)
assert labels[0].get_horizontalalignment() == 'center'
assert labels[0].get_verticalalignment() == 'center'
assert labels[1].xy == (0.5, 0.5)
assert labels[1].get_horizontalalignment() == 'center'
assert labels[1].get_verticalalignment() == 'center'
@image_comparison(['test_centered_bar_label_nonlinear.svg'])
def test_centered_bar_label_nonlinear():
_, ax = plt.subplots()
bar_container = ax.barh(['c', 'b', 'a'], [1_000, 5_000, 7_000])
ax.set_xscale('log')
ax.set_xlim(1, None)
ax.bar_label(bar_container, label_type='center')
ax.set_axis_off()
def test_centered_bar_label_label_beyond_limits():
fig, ax = plt.subplots()
last = 0
for label, value in zip(['a', 'b', 'c'], [10, 20, 50]):
bar_container = ax.barh('col', value, label=label, left=last)
ax.bar_label(bar_container, label_type='center')
last += value
ax.set_xlim(None, 20)
fig.draw_without_rendering()
def test_bar_label_location_errorbars():
ax = plt.gca()
xs, heights = [1, 2], [3, -4]
rects = ax.bar(xs, heights, yerr=1)
labels = ax.bar_label(rects)
assert labels[0].xy == (xs[0], heights[0] + 1)
assert labels[0].get_horizontalalignment() == 'center'
assert labels[0].get_verticalalignment() == 'bottom'
assert labels[1].xy == (xs[1], heights[1] - 1)
assert labels[1].get_horizontalalignment() == 'center'
assert labels[1].get_verticalalignment() == 'top'
@pytest.mark.parametrize('fmt', [
'%.2f', '{:.2f}', '{:.2f}'.format
])
def test_bar_label_fmt(fmt):
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
labels = ax.bar_label(rects, fmt=fmt)
assert labels[0].get_text() == '3.00'
assert labels[1].get_text() == '-4.00'
def test_bar_label_fmt_error():
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
with pytest.raises(TypeError, match='str or callable'):
_ = ax.bar_label(rects, fmt=10)
def test_bar_label_labels():
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
labels = ax.bar_label(rects, labels=['A', 'B'])
assert labels[0].get_text() == 'A'
assert labels[1].get_text() == 'B'
def test_bar_label_nan_ydata():
ax = plt.gca()
bars = ax.bar([2, 3], [np.nan, 1])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['', '1']
assert labels[0].xy == (2, 0)
assert labels[0].get_verticalalignment() == 'bottom'
def test_bar_label_nan_ydata_inverted():
ax = plt.gca()
ax.yaxis_inverted()
bars = ax.bar([2, 3], [np.nan, 1])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['', '1']
assert labels[0].xy == (2, 0)
assert labels[0].get_verticalalignment() == 'bottom'
def test_bar_label_padding():
"""Test that bar_label accepts both float and array-like padding."""
ax = plt.gca()
xs, heights = [1, 2], [3, 4]
rects = ax.bar(xs, heights)
labels1 = ax.bar_label(rects, padding=5) # test float value
assert labels1[0].xyann[1] == 5
assert labels1[1].xyann[1] == 5
labels2 = ax.bar_label(rects, padding=[2, 8]) # test array-like values
assert labels2[0].xyann[1] == 2
assert labels2[1].xyann[1] == 8
with pytest.raises(ValueError, match="padding must be of length"):
ax.bar_label(rects, padding=[1, 2, 3])
def test_nan_barlabels():
fig, ax = plt.subplots()
bars = ax.bar([1, 2, 3], [np.nan, 1, 2], yerr=[0.2, 0.4, 0.6])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['', '1', '2']
assert np.allclose(ax.get_ylim(), (0.0, 3.0))
fig, ax = plt.subplots()
bars = ax.bar([1, 2, 3], [0, 1, 2], yerr=[0.2, np.nan, 0.6])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['0', '1', '2']
assert np.allclose(ax.get_ylim(), (-0.5, 3.0))
fig, ax = plt.subplots()
bars = ax.bar([1, 2, 3], [np.nan, 1, 2], yerr=[np.nan, np.nan, 0.6])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['', '1', '2']
assert np.allclose(ax.get_ylim(), (0.0, 3.0))
def test_patch_bounds(): # PR 19078
fig, ax = plt.subplots()
ax.add_patch(mpatches.Wedge((0, -1), 1.05, 60, 120, width=0.1))
bot = 1.9*np.sin(15*np.pi/180)**2
np.testing.assert_array_almost_equal_nulp(
np.array((-0.525, -(bot+0.05), 1.05, bot+0.1)), ax.dataLim.bounds, 16)
@mpl.style.context('default')
def test_warn_ignored_scatter_kwargs():
with pytest.warns(UserWarning,
match=r"You passed an edgecolor/edgecolors"):
plt.scatter([0], [0], marker="+", s=500, facecolor="r", edgecolor="b")
def test_artist_sublists():
fig, ax = plt.subplots()
lines = [ax.plot(np.arange(i, i + 5))[0] for i in range(6)]
col = ax.scatter(np.arange(5), np.arange(5))
im = ax.imshow(np.zeros((5, 5)))
patch = ax.add_patch(mpatches.Rectangle((0, 0), 5, 5))
text = ax.text(0, 0, 'foo')
# Get items, which should not be mixed.
assert list(ax.collections) == [col]
assert list(ax.images) == [im]
assert list(ax.lines) == lines
assert list(ax.patches) == [patch]
assert not ax.tables
assert list(ax.texts) == [text]
# Get items should work like lists/tuple.
assert ax.lines[0] is lines[0]
assert ax.lines[-1] is lines[-1]
with pytest.raises(IndexError, match='out of range'):
ax.lines[len(lines) + 1]
# Adding to other lists should produce a regular list.
assert ax.lines + [1, 2, 3] == [*lines, 1, 2, 3]
assert [1, 2, 3] + ax.lines == [1, 2, 3, *lines]
# Adding to other tuples should produce a regular tuples.
assert ax.lines + (1, 2, 3) == (*lines, 1, 2, 3)
assert (1, 2, 3) + ax.lines == (1, 2, 3, *lines)
# Lists should be empty after removing items.
col.remove()
assert not ax.collections
im.remove()
assert not ax.images
patch.remove()
assert not ax.patches
assert not ax.tables
text.remove()
assert not ax.texts
for ln in ax.lines:
ln.remove()
assert len(ax.lines) == 0
def test_empty_line_plots():
# Incompatible nr columns, plot "nothing"
x = np.ones(10)
y = np.ones((10, 0))
_, ax = plt.subplots()
line = ax.plot(x, y)
assert len(line) == 0
# Ensure plot([],[]) creates line
_, ax = plt.subplots()
line = ax.plot([], [])
assert len(line) == 1
@pytest.mark.parametrize('fmt, match', (
("f", r"'f' is not a valid format string \(unrecognized character 'f'\)"),
("o+", r"'o\+' is not a valid format string \(two marker symbols\)"),
(":-", r"':-' is not a valid format string \(two linestyle symbols\)"),
("rk", r"'rk' is not a valid format string \(two color symbols\)"),
(":o-r", r"':o-r' is not a valid format string \(two linestyle symbols\)"),
("C", r"'C' is not a valid format string \('C' must be followed by a number\)"),
(".C", r"'.C' is not a valid format string \('C' must be followed by a number\)"),
))
@pytest.mark.parametrize("data", [None, {"string": range(3)}])
def test_plot_format_errors(fmt, match, data):
fig, ax = plt.subplots()
if data is not None:
match = match.replace("not", "neither a data key nor")
with pytest.raises(ValueError, match=r"\A" + match + r"\Z"):
ax.plot("string", fmt, data=data)
def test_plot_format():
fig, ax = plt.subplots()
line = ax.plot([1, 2, 3], '1.0')
assert line[0].get_color() == (1.0, 1.0, 1.0, 1.0)
assert line[0].get_marker() == 'None'
fig, ax = plt.subplots()
line = ax.plot([1, 2, 3], '1')
assert line[0].get_marker() == '1'
fig, ax = plt.subplots()
line = ax.plot([1, 2], [1, 2], '1.0', "1")
fig.canvas.draw()
assert line[0].get_color() == (1.0, 1.0, 1.0, 1.0)
assert ax.get_yticklabels()[0].get_text() == '1'
fig, ax = plt.subplots()
line = ax.plot([1, 2], [1, 2], '1', "1.0")
fig.canvas.draw()
assert line[0].get_marker() == '1'
assert ax.get_yticklabels()[0].get_text() == '1.0'
fig, ax = plt.subplots()
line = ax.plot([1, 2, 3], 'k3')
assert line[0].get_marker() == '3'
assert line[0].get_color() == 'k'
fig, ax = plt.subplots()
line = ax.plot([1, 2, 3], '.C12:')
assert line[0].get_marker() == '.'
assert line[0].get_color() == mcolors.to_rgba('C12')
assert line[0].get_linestyle() == ':'
def test_automatic_legend():
fig, ax = plt.subplots()
ax.plot("a", "b", data={"d": 2})
leg = ax.legend()
fig.canvas.draw()
assert leg.get_texts()[0].get_text() == 'a'
assert ax.get_yticklabels()[0].get_text() == 'a'
fig, ax = plt.subplots()
ax.plot("a", "b", "c", data={"d": 2})
leg = ax.legend()
fig.canvas.draw()
assert leg.get_texts()[0].get_text() == 'b'
assert ax.get_xticklabels()[0].get_text() == 'a'
assert ax.get_yticklabels()[0].get_text() == 'b'
def test_plot_errors():
with pytest.raises(TypeError, match=r"plot\(\) got an unexpected keyword"):
plt.plot([1, 2, 3], x=1)
with pytest.raises(ValueError, match=r"plot\(\) with multiple groups"):
plt.plot([1, 2, 3], [1, 2, 3], [2, 3, 4], [2, 3, 4], label=['1', '2'])
with pytest.raises(ValueError, match="x and y must have same first"):
plt.plot([1, 2, 3], [1])
with pytest.raises(ValueError, match="x and y can be no greater than"):
plt.plot(np.ones((2, 2, 2)))
with pytest.raises(ValueError, match="Using arbitrary long args with"):
plt.plot("a", "b", "c", "d", data={"a": 2})
def test_clim():
ax = plt.figure().add_subplot()
for plot_method in [
partial(ax.scatter, range(3), range(3), c=range(3)),
partial(ax.imshow, [[0, 1], [2, 3]]),
partial(ax.pcolor, [[0, 1], [2, 3]]),
partial(ax.pcolormesh, [[0, 1], [2, 3]]),
partial(ax.pcolorfast, [[0, 1], [2, 3]]),
]:
clim = (7, 8)
norm = plot_method(clim=clim).norm
assert (norm.vmin, norm.vmax) == clim
def test_bezier_autoscale():
# Check that bezier curves autoscale to their curves, and not their
# control points
verts = [[-1, 0],
[0, -1],
[1, 0],
[1, 0]]
codes = [mpath.Path.MOVETO,
mpath.Path.CURVE3,
mpath.Path.CURVE3,
mpath.Path.CLOSEPOLY]
p = mpath.Path(verts, codes)
fig, ax = plt.subplots()
ax.add_patch(mpatches.PathPatch(p))
ax.autoscale()
# Bottom ylim should be at the edge of the curve (-0.5), and not include
# the control point (at -1)
assert ax.get_ylim()[0] == -0.5
def test_small_autoscale():
# Check that paths with small values autoscale correctly #24097.
verts = np.array([
[-5.45, 0.00], [-5.45, 0.00], [-5.29, 0.00], [-5.29, 0.00],
[-5.13, 0.00], [-5.13, 0.00], [-4.97, 0.00], [-4.97, 0.00],
[-4.81, 0.00], [-4.81, 0.00], [-4.65, 0.00], [-4.65, 0.00],
[-4.49, 0.00], [-4.49, 0.00], [-4.33, 0.00], [-4.33, 0.00],
[-4.17, 0.00], [-4.17, 0.00], [-4.01, 0.00], [-4.01, 0.00],
[-3.85, 0.00], [-3.85, 0.00], [-3.69, 0.00], [-3.69, 0.00],
[-3.53, 0.00], [-3.53, 0.00], [-3.37, 0.00], [-3.37, 0.00],
[-3.21, 0.00], [-3.21, 0.01], [-3.05, 0.01], [-3.05, 0.01],
[-2.89, 0.01], [-2.89, 0.01], [-2.73, 0.01], [-2.73, 0.02],
[-2.57, 0.02], [-2.57, 0.04], [-2.41, 0.04], [-2.41, 0.04],
[-2.25, 0.04], [-2.25, 0.06], [-2.09, 0.06], [-2.09, 0.08],
[-1.93, 0.08], [-1.93, 0.10], [-1.77, 0.10], [-1.77, 0.12],
[-1.61, 0.12], [-1.61, 0.14], [-1.45, 0.14], [-1.45, 0.17],
[-1.30, 0.17], [-1.30, 0.19], [-1.14, 0.19], [-1.14, 0.22],
[-0.98, 0.22], [-0.98, 0.25], [-0.82, 0.25], [-0.82, 0.27],
[-0.66, 0.27], [-0.66, 0.29], [-0.50, 0.29], [-0.50, 0.30],
[-0.34, 0.30], [-0.34, 0.32], [-0.18, 0.32], [-0.18, 0.33],
[-0.02, 0.33], [-0.02, 0.32], [0.13, 0.32], [0.13, 0.33], [0.29, 0.33],
[0.29, 0.31], [0.45, 0.31], [0.45, 0.30], [0.61, 0.30], [0.61, 0.28],
[0.77, 0.28], [0.77, 0.25], [0.93, 0.25], [0.93, 0.22], [1.09, 0.22],
[1.09, 0.19], [1.25, 0.19], [1.25, 0.17], [1.41, 0.17], [1.41, 0.15],
[1.57, 0.15], [1.57, 0.12], [1.73, 0.12], [1.73, 0.10], [1.89, 0.10],
[1.89, 0.08], [2.05, 0.08], [2.05, 0.07], [2.21, 0.07], [2.21, 0.05],
[2.37, 0.05], [2.37, 0.04], [2.53, 0.04], [2.53, 0.02], [2.69, 0.02],
[2.69, 0.02], [2.85, 0.02], [2.85, 0.01], [3.01, 0.01], [3.01, 0.01],
[3.17, 0.01], [3.17, 0.00], [3.33, 0.00], [3.33, 0.00], [3.49, 0.00],
[3.49, 0.00], [3.65, 0.00], [3.65, 0.00], [3.81, 0.00], [3.81, 0.00],
[3.97, 0.00], [3.97, 0.00], [4.13, 0.00], [4.13, 0.00], [4.29, 0.00],
[4.29, 0.00], [4.45, 0.00], [4.45, 0.00], [4.61, 0.00], [4.61, 0.00],
[4.77, 0.00], [4.77, 0.00], [4.93, 0.00], [4.93, 0.00],
])
minx = np.min(verts[:, 0])
miny = np.min(verts[:, 1])
maxx = np.max(verts[:, 0])
maxy = np.max(verts[:, 1])
p = mpath.Path(verts)
fig, ax = plt.subplots()
ax.add_patch(mpatches.PathPatch(p))
ax.autoscale()
assert ax.get_xlim()[0] <= minx
assert ax.get_xlim()[1] >= maxx
assert ax.get_ylim()[0] <= miny
assert ax.get_ylim()[1] >= maxy
def test_get_xticklabel():
fig, ax = plt.subplots()
ax.plot(np.arange(10))
for ind in range(10):
assert ax.get_xticklabels()[ind].get_text() == f'{ind}'
assert ax.get_yticklabels()[ind].get_text() == f'{ind}'
def test_bar_leading_nan():
barx = np.arange(3, dtype=float)
barheights = np.array([0.5, 1.5, 2.0])
barstarts = np.array([0.77]*3)
barx[0] = np.nan
fig, ax = plt.subplots()
bars = ax.bar(barx, barheights, bottom=barstarts)
hbars = ax.barh(barx, barheights, left=barstarts)
for bar_set in (bars, hbars):
# the first bar should have a nan in the location
nanful, *rest = bar_set
assert (~np.isfinite(nanful.xy)).any()
assert np.isfinite(nanful.get_width())
for b in rest:
assert np.isfinite(b.xy).all()
assert np.isfinite(b.get_width())
@check_figures_equal()
def test_bar_all_nan(fig_test, fig_ref):
mpl.style.use("mpl20")
ax_test = fig_test.subplots()
ax_ref = fig_ref.subplots()
ax_test.bar([np.nan], [np.nan])
ax_test.bar([1], [1])
ax_ref.bar([1], [1]).remove()
ax_ref.bar([1], [1])
@image_comparison(["extent_units.png"], style="mpl20")
def test_extent_units():
_, axs = plt.subplots(2, 2)
date_first = np.datetime64('2020-01-01', 'D')
date_last = np.datetime64('2020-01-11', 'D')
arr = [[i+j for i in range(10)] for j in range(10)]
axs[0, 0].set_title('Date extents on y axis')
im = axs[0, 0].imshow(arr, origin='lower',
extent=[1, 11, date_first, date_last],
cmap=mpl.colormaps["plasma"])
axs[0, 1].set_title('Date extents on x axis (Day of Jan 2020)')
im = axs[0, 1].imshow(arr, origin='lower',
extent=[date_first, date_last, 1, 11],
cmap=mpl.colormaps["plasma"])
axs[0, 1].xaxis.set_major_formatter(mdates.DateFormatter('%d'))
im = axs[1, 0].imshow(arr, origin='lower',
extent=[date_first, date_last,
date_first, date_last],
cmap=mpl.colormaps["plasma"])
axs[1, 0].xaxis.set_major_formatter(mdates.DateFormatter('%d'))
axs[1, 0].set(xlabel='Day of Jan 2020')
im = axs[1, 1].imshow(arr, origin='lower',
cmap=mpl.colormaps["plasma"])
im.set_extent([date_last, date_first, date_last, date_first])
axs[1, 1].xaxis.set_major_formatter(mdates.DateFormatter('%d'))
axs[1, 1].set(xlabel='Day of Jan 2020')
with pytest.raises(TypeError, match=r"set_extent\(\) got an unexpected"):
im.set_extent([2, 12, date_first, date_last], clip=False)
def test_cla_clears_children_axes_and_fig():
fig, ax = plt.subplots()
lines = ax.plot([], [], [], [])
img = ax.imshow([[1]])
for art in lines + [img]:
assert art.axes is ax
assert art.get_figure() is fig
ax.clear()
for art in lines + [img]:
assert art.axes is None
assert art.get_figure() is None
def test_child_axes_removal():
fig, ax = plt.subplots()
marginal = ax.inset_axes([1, 0, .1, 1], sharey=ax)
marginal_twin = marginal.twinx()
marginal.remove()
ax.set(xlim=(-1, 1), ylim=(10, 20))
def test_scatter_color_repr_error():
def get_next_color(): # pragma: no cover
return 'blue' # currently unused
msg = (
r"'c' argument must be a color, a sequence of colors"
r", or a sequence of numbers, not 'red\\n'"
)
with pytest.raises(ValueError, match=msg):
c = 'red\n'
mpl.axes.Axes._parse_scatter_color_args(
c, None, kwargs={}, xsize=2, get_next_color_func=get_next_color)
def test_zorder_and_explicit_rasterization():
fig, ax = plt.subplots()
ax.set_rasterization_zorder(5)
ln, = ax.plot(range(5), rasterized=True, zorder=1)
with io.BytesIO() as b:
fig.savefig(b, format='pdf')
@image_comparison(["preset_clip_paths.png"], remove_text=True, style="mpl20",
tol=0 if platform.machine() == 'x86_64' else 0.027)
def test_preset_clip_paths():
fig, ax = plt.subplots()
poly = mpl.patches.Polygon(
[[1, 0], [0, 1], [-1, 0], [0, -1]], facecolor="#ddffdd",
edgecolor="#00ff00", linewidth=2, alpha=0.5)
ax.add_patch(poly)
line = mpl.lines.Line2D((-1, 1), (0.5, 0.5), clip_on=True, clip_path=poly)
line.set_path_effects([patheffects.withTickedStroke()])
ax.add_artist(line)
line = mpl.lines.Line2D((-1, 1), (-0.5, -0.5), color='r', clip_on=True,
clip_path=poly)
ax.add_artist(line)
poly2 = mpl.patches.Polygon(
[[-1, 1], [0, 1], [0, -0.25]], facecolor="#beefc0", alpha=0.3,
edgecolor="#faded0", linewidth=2, clip_on=True, clip_path=poly)
ax.add_artist(poly2)
# When text clipping works, the "Annotation" text should be clipped
ax.annotate('Annotation', (-0.75, -0.75), xytext=(0.1, 0.75),
arrowprops={'color': 'k'}, clip_on=True, clip_path=poly)
poly3 = mpl.patches.Polygon(
[[0, 0], [0, 0.5], [0.5, 0.5], [0.5, 0]], facecolor="g", edgecolor="y",
linewidth=2, alpha=0.3, clip_on=True, clip_path=poly)
fig.add_artist(poly3, clip=True)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
@mpl.style.context('default')
def test_rc_axes_label_formatting():
mpl.rcParams['axes.labelcolor'] = 'red'
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.labelweight'] = 'bold'
ax = plt.axes()
assert ax.xaxis.label.get_color() == 'red'
assert ax.xaxis.label.get_fontsize() == 20
assert ax.xaxis.label.get_fontweight() == 'bold'
@check_figures_equal()
def test_ecdf(fig_test, fig_ref):
data = np.array([0, -np.inf, -np.inf, np.inf, 1, 1, 2])
weights = range(len(data))
axs_test = fig_test.subplots(1, 2)
for ax, orientation in zip(axs_test, ["vertical", "horizontal"]):
l0 = ax.ecdf(data, orientation=orientation)
l1 = ax.ecdf("d", "w", data={"d": np.ma.array(data), "w": weights},
orientation=orientation,
complementary=True, compress=True, ls=":")
assert len(l0.get_xdata()) == (~np.isnan(data)).sum() + 1
assert len(l1.get_xdata()) == len({*data[~np.isnan(data)]}) + 1
axs_ref = fig_ref.subplots(1, 2)
axs_ref[0].plot([-np.inf, -np.inf, -np.inf, 0, 1, 1, 2, np.inf],
np.arange(8) / 7, ds="steps-post")
axs_ref[0].plot([-np.inf, 0, 1, 2, np.inf, np.inf],
np.array([21, 20, 18, 14, 3, 0]) / 21,
ds="steps-pre", ls=":")
axs_ref[1].plot(np.arange(8) / 7,
[-np.inf, -np.inf, -np.inf, 0, 1, 1, 2, np.inf],
ds="steps-pre")
axs_ref[1].plot(np.array([21, 20, 18, 14, 3, 0]) / 21,
[-np.inf, 0, 1, 2, np.inf, np.inf],
ds="steps-post", ls=":")
def test_ecdf_invalid():
with pytest.raises(ValueError):
plt.ecdf([1, np.nan])
with pytest.raises(ValueError):
plt.ecdf(np.ma.array([1, 2], mask=[True, False]))
def test_fill_between_axes_limits():
fig, ax = plt.subplots()
x = np.arange(0, 4 * np.pi, 0.01)
y = 0.1*np.sin(x)
threshold = 0.075
ax.plot(x, y, color='black')
original_lims = (ax.get_xlim(), ax.get_ylim())
ax.axhline(threshold, color='green', lw=2, alpha=0.7)
ax.fill_between(x, 0, 1, where=y > threshold,
color='green', alpha=0.5, transform=ax.get_xaxis_transform())
assert (ax.get_xlim(), ax.get_ylim()) == original_lims
def test_tick_param_labelfont():
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4], [1, 2, 3, 4])
ax.set_xlabel('X label in Impact font', fontname='Impact')
ax.set_ylabel('Y label in xkcd script', fontname='xkcd script')
ax.tick_params(color='r', labelfontfamily='monospace')
plt.title('Title in sans-serif')
for text in ax.get_xticklabels():
assert text.get_fontfamily()[0] == 'monospace'
def test_set_secondary_axis_color():
fig, ax = plt.subplots()
sax = ax.secondary_xaxis("top", color="red")
assert mcolors.same_color(sax.spines["bottom"].get_edgecolor(), "red")
assert mcolors.same_color(sax.spines["top"].get_edgecolor(), "red")
assert mcolors.same_color(sax.xaxis.get_tick_params()["color"], "red")
assert mcolors.same_color(sax.xaxis.get_tick_params()["labelcolor"], "red")
assert mcolors.same_color(sax.xaxis.label.get_color(), "red")
def test_xylim_changed_shared():
fig, axs = plt.subplots(2, sharex=True, sharey=True)
events = []
axs[1].callbacks.connect("xlim_changed", events.append)
axs[1].callbacks.connect("ylim_changed", events.append)
axs[0].set(xlim=[1, 3], ylim=[2, 4])
assert events == [axs[1], axs[1]]
@image_comparison(["axhvlinespan_interpolation.png"], style="default")
def test_axhvlinespan_interpolation():
ax = plt.figure().add_subplot(projection="polar")
ax.set_axis_off()
ax.axvline(.1, c="C0")
ax.axvspan(.2, .3, fc="C1")
ax.axvspan(.4, .5, .1, .2, fc="C2")
ax.axhline(1, c="C0", alpha=.5)
ax.axhspan(.8, .9, fc="C1", alpha=.5)
ax.axhspan(.6, .7, .8, .9, fc="C2", alpha=.5)
@check_figures_equal()
@pytest.mark.parametrize("which", ("x", "y"))
def test_axes_clear_behavior(fig_ref, fig_test, which):
"""Test that the given tick params are not reset by ax.clear()."""
ax_test = fig_test.subplots()
ax_ref = fig_ref.subplots()
# the following tick params values are chosen to each create a visual difference
# from their defaults
target = {
"direction": "in",
"length": 10,
"width": 10,
"color": "xkcd:wine red",
"pad": 0,
"labelfontfamily": "serif",
"zorder": 7,
"labelrotation": 45,
"labelcolor": "xkcd:shocking pink",
# this overrides color + labelcolor, skip
# colors: ,
"grid_color": "xkcd:fluorescent green",
"grid_alpha": 0.5,
"grid_linewidth": 3,
"grid_linestyle": ":",
"bottom": False,
"top": True,
"left": False,
"right": True,
"labelbottom": True,
"labeltop": True,
"labelleft": True,
"labelright": True,
}
ax_ref.tick_params(axis=which, **target)
ax_test.tick_params(axis=which, **target)
ax_test.clear()
ax_ref.grid(True)
ax_test.grid(True)
@pytest.mark.skipif(
sys.version_info[:3] == (3, 13, 0) and sys.version_info.releaselevel != "final",
reason="https://github.com/python/cpython/issues/124538",
)
def test_axes_clear_reference_cycle():
def assert_not_in_reference_cycle(start):
# Breadth first search. Return True if we encounter the starting node
to_visit = deque([start])
explored = set()
while len(to_visit) > 0:
parent = to_visit.popleft()
for child in gc.get_referents(parent):
if id(child) in explored:
continue
assert child is not start
explored.add(id(child))
to_visit.append(child)
fig = Figure()
ax = fig.add_subplot()
points = np.random.rand(1000)
ax.plot(points, points)
ax.scatter(points, points)
ax_children = ax.get_children()
fig.clear() # This should break the reference cycle
# Care most about the objects that scale with number of points
big_artists = [
a for a in ax_children
if isinstance(a, (Line2D, PathCollection))
]
assert len(big_artists) > 0
for big_artist in big_artists:
assert_not_in_reference_cycle(big_artist)
assert len(ax_children) > 0
for child in ax_children:
# Make sure this doesn't raise because the child is already removed.
try:
child.remove()
except NotImplementedError:
pass # not implemented is expected for some artists
def test_boxplot_tick_labels():
# Test the renamed `tick_labels` parameter.
# Test for deprecation of old name `labels`.
np.random.seed(19680801)
data = np.random.random((10, 3))
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True)
# Should get deprecation warning for `labels`
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match='has been renamed \'tick_labels\''):
axs[0].boxplot(data, labels=['A', 'B', 'C'])
assert [l.get_text() for l in axs[0].get_xticklabels()] == ['A', 'B', 'C']
# Test the new tick_labels parameter
axs[1].boxplot(data, tick_labels=['A', 'B', 'C'])
assert [l.get_text() for l in axs[1].get_xticklabels()] == ['A', 'B', 'C']
@needs_usetex
@check_figures_equal()
def test_latex_pie_percent(fig_test, fig_ref):
data = [20, 10, 70]
ax = fig_test.subplots()
ax.pie(data, autopct="%1.0f%%", textprops={'usetex': True})
ax1 = fig_ref.subplots()
ax1.pie(data, autopct=r"%1.0f\%%", textprops={'usetex': True})
@check_figures_equal()
def test_violinplot_orientation(fig_test, fig_ref):
# Test the `orientation : {'vertical', 'horizontal'}`
# parameter and deprecation of `vert: bool`.
fig, axs = plt.subplots(nrows=1, ncols=3)
np.random.seed(19680801)
all_data = [np.random.normal(0, std, 100) for std in range(6, 10)]
axs[0].violinplot(all_data) # Default vertical plot.
# xticks and yticks should be at their default position.
assert all(axs[0].get_xticks() == np.array(
[0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
assert all(axs[0].get_yticks() == np.array(
[-30., -20., -10., 0., 10., 20., 30.]))
# Horizontal plot using new `orientation` keyword.
axs[1].violinplot(all_data, orientation='horizontal')
# xticks and yticks should be swapped.
assert all(axs[1].get_xticks() == np.array(
[-30., -20., -10., 0., 10., 20., 30.]))
assert all(axs[1].get_yticks() == np.array(
[0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
plt.close()
# Deprecation of `vert: bool` keyword
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match='vert: bool was deprecated in Matplotlib 3.11'):
# Compare images between a figure that
# uses vert and one that uses orientation.
ax_ref = fig_ref.subplots()
ax_ref.violinplot(all_data, vert=False)
ax_test = fig_test.subplots()
ax_test.violinplot(all_data, orientation='horizontal')
@check_figures_equal()
def test_boxplot_orientation(fig_test, fig_ref):
# Test the `orientation : {'vertical', 'horizontal'}`
# parameter and deprecation of `vert: bool`.
fig, axs = plt.subplots(nrows=1, ncols=2)
np.random.seed(19680801)
all_data = [np.random.normal(0, std, 100) for std in range(6, 10)]
axs[0].boxplot(all_data) # Default vertical plot.
# xticks and yticks should be at their default position.
assert all(axs[0].get_xticks() == np.array(
[1, 2, 3, 4]))
assert all(axs[0].get_yticks() == np.array(
[-30., -20., -10., 0., 10., 20., 30.]))
# Horizontal plot using new `orientation` keyword.
axs[1].boxplot(all_data, orientation='horizontal')
# xticks and yticks should be swapped.
assert all(axs[1].get_xticks() == np.array(
[-30., -20., -10., 0., 10., 20., 30.]))
assert all(axs[1].get_yticks() == np.array(
[1, 2, 3, 4]))
plt.close()
# Deprecation of `vert: bool` keyword and
# 'boxplot.vertical' rcparam.
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match='was deprecated in Matplotlib 3.10'):
# Compare images between a figure that
# uses vert and one that uses orientation.
with mpl.rc_context({'boxplot.vertical': False}):
ax_ref = fig_ref.subplots()
ax_ref.boxplot(all_data)
ax_test = fig_test.subplots()
ax_test.boxplot(all_data, orientation='horizontal')
@image_comparison(["use_colorizer_keyword.png"],
tol=0 if platform.machine() == 'x86_64' else 0.05)
def test_use_colorizer_keyword():
# test using the colorizer keyword
np.random.seed(0)
rand_x = np.random.random(100)
rand_y = np.random.random(100)
c = np.arange(25, dtype='float32').reshape((5, 5))
fig, axes = plt.subplots(3, 4)
norm = mpl.colors.Normalize(4, 20)
cl = mpl.colorizer.Colorizer(norm=norm, cmap='RdBu')
axes[0, 0].scatter(c, c, c=c, colorizer=cl)
axes[0, 1].hexbin(rand_x, rand_y, colorizer=cl, gridsize=(2, 2))
axes[0, 2].imshow(c, colorizer=cl)
axes[0, 3].pcolor(c, colorizer=cl)
axes[1, 0].pcolormesh(c, colorizer=cl)
axes[1, 1].pcolorfast(c, colorizer=cl) # style = image
axes[1, 2].pcolorfast((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 5, 6), c,
colorizer=cl) # style = pcolorimage
axes[1, 3].pcolorfast(c.T, c, c[:4, :4], colorizer=cl) # style = quadmesh
axes[2, 0].contour(c, colorizer=cl)
axes[2, 1].contourf(c, colorizer=cl)
axes[2, 2].tricontour(c.T.ravel(), c.ravel(), c.ravel(), colorizer=cl)
axes[2, 3].tricontourf(c.T.ravel(), c.ravel(), c.ravel(), colorizer=cl)
fig.figimage(np.repeat(np.repeat(c, 15, axis=0), 15, axis=1), colorizer=cl)
remove_ticks_and_titles(fig)
def test_wrong_use_colorizer():
# test using the colorizer keyword and norm or cmap
np.random.seed(0)
rand_x = np.random.random(100)
rand_y = np.random.random(100)
c = np.arange(25, dtype='float32').reshape((5, 5))
fig, axes = plt.subplots(3, 4)
norm = mpl.colors.Normalize(4, 20)
cl = mpl.colorizer.Colorizer(norm=norm, cmap='RdBu')
match_str = "The `colorizer` keyword cannot be used simultaneously"
kwrds = [{'vmin': 0}, {'vmax': 0}, {'norm': 'log'}, {'cmap': 'viridis'}]
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[0, 0].scatter(c, c, c=c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[0, 0].scatter(c, c, c=c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[0, 1].hexbin(rand_x, rand_y, colorizer=cl, gridsize=(2, 2), **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[0, 2].imshow(c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[0, 3].pcolor(c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[1, 0].pcolormesh(c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[1, 1].pcolorfast(c, colorizer=cl, **kwrd) # style = image
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[1, 2].pcolorfast((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 5, 6), c,
colorizer=cl, **kwrd) # style = pcolorimage
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[1, 3].pcolorfast(c.T, c, c[:4, :4], colorizer=cl, **kwrd) # quadmesh
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[2, 0].contour(c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[2, 1].contourf(c, colorizer=cl, **kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[2, 2].tricontour(c.T.ravel(), c.ravel(), c.ravel(), colorizer=cl,
**kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
axes[2, 3].tricontourf(c.T.ravel(), c.ravel(), c.ravel(), colorizer=cl,
**kwrd)
for kwrd in kwrds:
with pytest.raises(ValueError, match=match_str):
fig.figimage(c, colorizer=cl, **kwrd)
def test_bar_color_precedence():
# Test the precedence of 'color' and 'facecolor' in bar plots
fig, ax = plt.subplots()
# case 1: no color specified
bars = ax.bar([1, 2, 3], [4, 5, 6])
for bar in bars:
assert mcolors.same_color(bar.get_facecolor(), 'blue')
# case 2: Only 'color'
bars = ax.bar([11, 12, 13], [4, 5, 6], color='red')
for bar in bars:
assert mcolors.same_color(bar.get_facecolor(), 'red')
# case 3: Only 'facecolor'
bars = ax.bar([21, 22, 23], [4, 5, 6], facecolor='yellow')
for bar in bars:
assert mcolors.same_color(bar.get_facecolor(), 'yellow')
# case 4: 'facecolor' and 'color'
bars = ax.bar([31, 32, 33], [4, 5, 6], color='red', facecolor='green')
for bar in bars:
assert mcolors.same_color(bar.get_facecolor(), 'green')
@check_figures_equal()
def test_axes_set_position_external_bbox_unchanged(fig_test, fig_ref):
# From #29410: Modifying Axes' position also alters the original Bbox
# object used for initialization
bbox = mtransforms.Bbox([[0.0, 0.0], [1.0, 1.0]])
ax_test = fig_test.add_axes(bbox)
ax_test.set_position([0.25, 0.25, 0.5, 0.5])
assert (bbox.x0, bbox.y0, bbox.width, bbox.height) == (0.0, 0.0, 1.0, 1.0)
ax_ref = fig_ref.add_axes((0.25, 0.25, 0.5, 0.5))
def test_bar_shape_mismatch():
x = ["foo", "bar"]
height = [1, 2, 3]
error_message = (
r"Mismatch is between 'x' with shape \(2,\) and 'height' with shape \(3,\)"
)
with pytest.raises(ValueError, match=error_message):
plt.bar(x, height)
def test_caps_color():
# Creates a simple plot with error bars and a specified ecolor
x = np.linspace(0, 10, 10)
mpl.rcParams['lines.markeredgecolor'] = 'green'
ecolor = 'red'
fig, ax = plt.subplots()
errorbars = ax.errorbar(x, np.sin(x), yerr=0.1, ecolor=ecolor)
# Tests if the caps have the specified color
for cap in errorbars[2]:
assert mcolors.same_color(cap.get_edgecolor(), ecolor)
def test_caps_no_ecolor():
# Creates a simple plot with error bars without specifying ecolor
x = np.linspace(0, 10, 10)
mpl.rcParams['lines.markeredgecolor'] = 'green'
fig, ax = plt.subplots()
errorbars = ax.errorbar(x, np.sin(x), yerr=0.1)
# Tests if the caps have the default color (blue)
for cap in errorbars[2]:
assert mcolors.same_color(cap.get_edgecolor(), "blue")
def test_pie_non_finite_values():
fig, ax = plt.subplots()
df = [5, float('nan'), float('inf')]
with pytest.raises(ValueError, match='Wedge sizes must be finite numbers'):
ax.pie(df, labels=['A', 'B', 'C'])
def test_pie_all_zeros():
fig, ax = plt.subplots()
with pytest.raises(ValueError, match="All wedge sizes are zero"):
ax.pie([0, 0], labels=["A", "B"])
| _Translation |
python | getsentry__sentry | src/sentry/auth/providers/google/views.py | {
"start": 637,
"end": 3130
} | class ____(AuthView):
def __init__(
self, domains: list[str] | None, version: str | None, *args: Any, **kwargs: Any
) -> None:
self.domains = domains
self.version = version
super().__init__(*args, **kwargs)
def dispatch(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase:
data: dict[str, Any] | None = pipeline.fetch_state("data")
assert data is not None
try:
id_token = data["id_token"]
except KeyError:
logger.exception("Missing id_token in OAuth response: %s", data)
return pipeline.error(ERR_INVALID_RESPONSE)
try:
_, payload_b, _ = map(urlsafe_b64decode, id_token.split(".", 2))
except Exception as exc:
logger.exception("Unable to decode id_token: %s", exc)
return pipeline.error(ERR_INVALID_RESPONSE)
try:
payload: dict[str, Any] = orjson.loads(payload_b)
except Exception as exc:
logger.exception("Unable to decode id_token payload: %s", exc)
return pipeline.error(ERR_INVALID_RESPONSE)
if not payload.get("email"):
logger.error("Missing email in id_token payload: %s", id_token)
return pipeline.error(ERR_INVALID_RESPONSE)
# support legacy style domains with pure domain regexp
domain: str | None = None
if self.version is None:
domain = extract_domain(payload["email"])
else:
domain = payload.get("hd")
if domain is None:
return pipeline.error(ERR_INVALID_DOMAIN % (domain,))
if domain in DOMAIN_BLOCKLIST:
return pipeline.error(ERR_INVALID_DOMAIN % (domain,))
if self.domains and domain not in self.domains:
return pipeline.error(ERR_INVALID_DOMAIN % (domain,))
pipeline.bind_state("domain", domain)
pipeline.bind_state("user", payload)
return pipeline.next_step()
def google_configure_view(
request: HttpRequest, organization: RpcOrganization, auth_provider: RpcAuthProvider
) -> DeferredResponse:
config = auth_provider.config
if config.get("domain"):
domains: list[str] | None
domains = [config["domain"]]
else:
domains = config.get("domains")
return DeferredResponse("sentry_auth_google/configure.html", {"domains": domains or []})
def extract_domain(email: str) -> str:
return email.rsplit("@", 1)[-1]
| FetchUser |
python | docker__docker-py | docker/transport/sshconn.py | {
"start": 2515,
"end": 3181
} | class ____(urllib3.connection.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
| SSHConnection |
python | google__pytype | pytype/rewrite/abstract/classes_test.py | {
"start": 178,
"end": 2031
} | class ____(test_utils.ContextfulTestBase):
def test_get_attribute(self):
x = self.ctx.consts[5]
cls = classes.SimpleClass(self.ctx, 'X', {'x': x})
self.assertEqual(cls.get_attribute('x'), x)
def test_get_nonexistent_attribute(self):
cls = classes.SimpleClass(self.ctx, 'X', {})
self.assertIsNone(cls.get_attribute('x'))
def test_get_parent_attribute(self):
x = self.ctx.consts[5]
parent = classes.SimpleClass(self.ctx, 'Parent', {'x': x})
child = classes.SimpleClass(self.ctx, 'Child', {}, bases=[parent])
self.assertEqual(child.get_attribute('x'), x)
def test_instantiate(self):
cls = classes.SimpleClass(self.ctx, 'X', {})
instance = cls.instantiate()
self.assertEqual(instance.cls, cls)
def test_call(self):
cls = classes.SimpleClass(self.ctx, 'X', {})
instance = cls.call(functions.Args()).get_return_value()
self.assertEqual(instance.cls, cls)
def test_mro(self):
parent = classes.SimpleClass(self.ctx, 'Parent', {})
child = classes.SimpleClass(self.ctx, 'Child', {}, bases=[parent])
self.assertEqual(child.mro(), [child, parent, self.ctx.types[object]])
def test_metaclass(self):
type_type = cast(classes.SimpleClass, self.ctx.types[type])
meta = classes.SimpleClass(self.ctx, 'Meta', {}, bases=[type_type])
cls = classes.SimpleClass(self.ctx, 'C', {}, keywords={'metaclass': meta})
self.assertEqual(cls.metaclass, meta)
def test_inherited_metaclass(self):
type_type = cast(classes.SimpleClass, self.ctx.types[type])
meta = classes.SimpleClass(self.ctx, 'Meta', {}, bases=[type_type])
parent = classes.SimpleClass(self.ctx, 'Parent', {},
keywords={'metaclass': meta})
child = classes.SimpleClass(self.ctx, 'Child', {}, bases=[parent])
self.assertEqual(child.metaclass, meta)
| ClassTest |
python | huggingface__transformers | tests/repo_utils/test_check_copies.py | {
"start": 2654,
"end": 2944
} | class ____(BertCopyPreTrainedModel):
def __init__(self, config):
super().__init__()
self.bertcopy = BertCopyEncoder(config)
@add_docstring(BERTCOPY_DOCSTRING)
def forward(self, x):
return self.bertcopy(x)
"""
MOCK_DUMMY_BERT_CODE_MATCH = """
| BertCopyModel |
python | plotly__plotly.py | plotly/graph_objs/histogram/marker/colorbar/_title.py | {
"start": 233,
"end": 4035
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram.marker.colorbar"
_path_str = "histogram.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram.mark
er.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 1796,
"end": 2197
} | class ____(torch.nn.Module):
def __init__(self, activation):
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
self.activation = activation
def forward(self, x):
x = self.linear1(x)
if self.activation is not None:
x = self.activation(x)
if self.activation is None:
x = torch.sigmoid(x)
return x
| FnMemberCmp |
python | pydata__xarray | xarray/backends/scipy_.py | {
"start": 3474,
"end": 5736
} | class ____(netcdf_file_base):
# scipy.io.netcdf_file.close() incorrectly closes file objects that
# were passed in as constructor arguments:
# https://github.com/scipy/scipy/issues/13905
# Instead of closing such files, only call flush(), which is
# equivalent as long as the netcdf_file object is not mmapped.
# This suffices to keep BytesIO objects open long enough to read
# their contents from to_netcdf(), but underlying files still get
# closed when the netcdf_file is garbage collected (via __del__),
# and will need to be fixed upstream in scipy.
def close(self):
if hasattr(self, "fp") and not self.fp.closed:
self.flush()
self.fp.seek(0) # allow file to be read again
def __del__(self):
# Remove the __del__ method, which in scipy is aliased to close().
# These files need to be closed explicitly by xarray.
pass
def _open_scipy_netcdf(filename, mode, mmap, version, flush_only=False):
import scipy.io
netcdf_file = flush_only_netcdf_file if flush_only else scipy.io.netcdf_file
# if the string ends with .gz, then gunzip and open as netcdf file
if isinstance(filename, str) and filename.endswith(".gz"):
try:
return netcdf_file(
gzip.open(filename), mode=mode, mmap=mmap, version=version
)
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
errmsg = e.args[0]
if "is not a valid NetCDF 3 file" in errmsg:
raise ValueError(
"gzipped file loading only supports NetCDF 3 files."
) from e
else:
raise
try:
return netcdf_file(filename, mode=mode, mmap=mmap, version=version)
except TypeError as e: # netcdf3 message is obscure in this case
errmsg = e.args[0]
if "is not a valid NetCDF 3 file" in errmsg:
msg = """
If this is a NetCDF4 file, you may need to install the
netcdf4 library, e.g.,
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg) from e
else:
raise
| flush_only_netcdf_file |
python | pypa__pipenv | pipenv/utils/dependencies.py | {
"start": 39601,
"end": 48536
} | class ____:
"""Handles processing and environment variable expansion in VCS URLs."""
ENV_VAR_PATTERN = re.compile(r"\${([^}]+)}|\$([a-zA-Z_][a-zA-Z0-9_]*)")
@classmethod
def expand_env_vars(cls, value: str) -> str:
"""
Expands environment variables in a string, with detailed error handling.
Supports both ${VAR} and $VAR syntax.
"""
def _replace_var(match):
var_name = match.group(1) or match.group(2)
if var_name not in os.environ:
raise PipenvUsageError(
f"Environment variable '${var_name}' not found. "
"Please ensure all required environment variables are set."
)
return os.environ[var_name]
try:
return cls.ENV_VAR_PATTERN.sub(_replace_var, value)
except Exception as e:
raise PipenvUsageError(f"Error expanding environment variables: {str(e)}")
@classmethod
def process_vcs_url(cls, url: str) -> str:
"""
Processes a VCS URL, expanding environment variables in individual components.
Handles URLs of the form: vcs+protocol://username:password@hostname/path
"""
parsed = urlparse(url)
# Process each component separately
netloc_parts = parsed.netloc.split("@")
if len(netloc_parts) > 1:
# Handle auth information
auth, host = netloc_parts
if ":" in auth:
username, password = auth.split(":")
username = cls.expand_env_vars(username)
password = cls.expand_env_vars(password)
auth = f"{username}:{password}"
else:
auth = cls.expand_env_vars(auth)
netloc = f"{auth}@{host}"
else:
netloc = cls.expand_env_vars(parsed.netloc)
# Reconstruct URL with processed components
processed_parts = list(parsed)
processed_parts[1] = netloc # Update netloc
processed_parts[2] = cls.expand_env_vars(parsed.path) # Update path
return urlunparse(tuple(processed_parts))
def install_req_from_pipfile(name: str, pipfile: Dict[str, Any]) -> Tuple[Any, Any, str]:
"""
Creates an InstallRequirement from a name and a pipfile entry.
Enhanced to handle environment variables within VCS URLs.
"""
_pipfile = {}
vcs = None
if hasattr(pipfile, "keys"):
_pipfile = dict(pipfile).copy()
else:
vcs = next(iter([vcs for vcs in VCS_LIST if pipfile.startswith(f"{vcs}+")]), None)
if vcs is not None:
_pipfile[vcs] = pipfile
else: # normal named requirement
_pipfile["version"] = pipfile
extras = _pipfile.get("extras", [])
extras_str = f"[{','.join(extras)}]" if extras else ""
if not vcs:
vcs = next(iter([vcs for vcs in VCS_LIST if vcs in _pipfile]), None)
if vcs:
try:
vcs_url = _pipfile[vcs]
subdirectory = _pipfile.get("subdirectory", "")
if subdirectory:
subdirectory = f"#subdirectory={subdirectory}"
# Process VCS URL with environment variable handling
vcs_url, fallback_ref = normalize_vcs_url(vcs_url)
ref = _pipfile.get("ref", fallback_ref)
# Construct requirement string
req_str = f"{vcs_url}@{ref}{extras_str}"
if not req_str.startswith(f"{vcs}+"):
req_str = f"{vcs}+{req_str}"
if _pipfile.get("editable", False):
req_str = f"-e {name}{extras_str} @ {req_str}{subdirectory}"
else:
req_str = f"{name}{extras_str} @ {req_str}{subdirectory}"
except PipenvUsageError as e:
raise PipenvUsageError(
f"Error processing VCS URL for requirement '{name}': {str(e)}"
)
else:
# Handle non-VCS requirements (unchanged)
req_str = handle_non_vcs_requirement(name, _pipfile, extras_str)
# Create InstallRequirement
install_req, _ = expansive_install_req_from_line(
req_str,
comes_from=None,
use_pep517=False,
isolated=False,
hash_options={"hashes": _pipfile.get("hashes", [])},
constraint=False,
expand_env=True,
)
markers = PipenvMarkers.from_pipfile(name, _pipfile)
return install_req, markers, req_str
def handle_non_vcs_requirement(
name: str, _pipfile: Dict[str, Any], extras_str: str
) -> str:
"""Helper function to handle non-VCS requirements."""
if "path" in _pipfile:
return file_path_from_pipfile(_pipfile["path"], _pipfile)
elif "file" in _pipfile:
return file_path_from_pipfile(_pipfile["file"], _pipfile)
else:
version = get_version(_pipfile)
if is_star(version) or version == "==*":
version = ""
req_str = f"{name}{extras_str}{version}"
markers = PipenvMarkers.from_pipfile(name, _pipfile)
if markers:
req_str = f"{req_str};{markers}"
return req_str
def from_pipfile(name, pipfile):
install_req, markers, req_str = install_req_from_pipfile(name, pipfile)
if markers:
markers = str(markers)
install_req.markers = Marker(markers)
# Construct the requirement string for your Requirement class
extras_str = ""
if install_req.req and install_req.req.extras:
extras_str = f"[{','.join(install_req.req.extras)}]"
specifier = install_req.req.specifier if install_req.req else ""
req_str = f"{install_req.name}{extras_str}{specifier}"
if install_req.markers:
req_str += f"; {install_req.markers}"
# Create the Requirement instance
cls_inst = Requirement(req_str)
return cls_inst
def get_constraints_from_deps(deps):
"""Get constraints from dictionary-formatted dependency"""
constraints = set()
for dep_name, dep_version in deps.items():
c = None
# Constraints cannot contain extras
dep_name = dep_name.split("[", 1)[0]
# Creating a constraint as a canonical name plus a version specifier
if isinstance(dep_version, str):
if dep_version and not is_star(dep_version):
if COMPARE_OP.match(dep_version) is None:
dep_version = f"=={dep_version}"
c = f"{canonicalize_name(dep_name)}{dep_version}"
else:
c = canonicalize_name(dep_name)
elif not any(k in dep_version for k in ["path", "file", "uri"]):
if dep_version.get("skip_resolver") is True:
continue
version = dep_version.get("version", None)
if version and not is_star(version):
if COMPARE_OP.match(version) is None:
version = f"=={dep_version}"
c = f"{canonicalize_name(dep_name)}{version}"
else:
c = canonicalize_name(dep_name)
if c:
constraints.add(c)
return constraints
def prepare_constraint_file(
constraints,
directory=None,
sources=None,
pip_args=None,
):
if not directory:
directory = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-")
constraints = set(constraints)
constraints_file = NamedTemporaryFile(
mode="w",
prefix="pipenv-",
suffix="-constraints.txt",
dir=directory,
delete=False,
)
if sources and pip_args:
skip_args = ("build-isolation", "use-pep517", "cache-dir")
args_to_add = [
arg for arg in pip_args if not any(bad_arg in arg for bad_arg in skip_args)
]
requirementstxt_sources = " ".join(args_to_add) if args_to_add else ""
requirementstxt_sources = requirementstxt_sources.replace(" --", "\n--")
constraints_file.write(f"{requirementstxt_sources}\n")
if constraints:
constraints_file.write("\n".join(constraints))
constraints_file.close()
return constraints_file.name
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get("version", "")
if specified_version.startswith("=="):
return version.strip() == specified_version.split("==")[1].strip()
return True
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, "get"):
return pipfile_entry.get("editable", False)
return False
@contextmanager
def locked_repository(requirement):
if not requirement.is_vcs:
return
src_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-src")
with requirement.req.locked_vcs_repo(src_dir=src_dir) as repo:
yield repo
| VCSURLProcessor |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/schema.py | {
"start": 337,
"end": 1417
} | class ____(str, Enum):
"""
Callback manager event types.
Attributes:
CHUNKING: Logs for the before and after of text splitting.
NODE_PARSING: Logs for the documents and the nodes that they are parsed into.
EMBEDDING: Logs for the number of texts embedded.
LLM: Logs for the template and response of LLM calls.
QUERY: Keeps track of the start and end of each query.
RETRIEVE: Logs for the nodes retrieved for a query.
SYNTHESIZE: Logs for the result for synthesize calls.
TREE: Logs for the summary and level of summaries generated.
SUB_QUESTION: Logs for a generated sub question and answer.
"""
CHUNKING = "chunking"
NODE_PARSING = "node_parsing"
EMBEDDING = "embedding"
LLM = "llm"
QUERY = "query"
RETRIEVE = "retrieve"
SYNTHESIZE = "synthesize"
TREE = "tree"
SUB_QUESTION = "sub_question"
TEMPLATING = "templating"
FUNCTION_CALL = "function_call"
RERANKING = "reranking"
EXCEPTION = "exception"
AGENT_STEP = "agent_step"
| CBEventType |
python | Textualize__textual | src/textual/css/scalar_animation.py | {
"start": 339,
"end": 3159
} | class ____(Animation):
def __init__(
self,
widget: Widget,
styles: StylesBase,
start_time: float,
attribute: str,
value: ScalarOffset | Scalar,
duration: float | None,
speed: float | None,
easing: EasingFunction,
on_complete: CallbackType | None = None,
level: AnimationLevel = "full",
):
assert (
speed is not None or duration is not None
), "One of speed or duration required"
self.widget = widget
self.styles = styles
self.start_time = start_time
self.attribute = attribute
self.final_value = value
self.easing = easing
self.on_complete = on_complete
self.level = level
size = widget.outer_size
viewport = widget.app.size
self.start = getattr(styles, attribute).resolve(size, viewport)
self.destination = value.resolve(size, viewport)
if speed is not None:
distance = self.start.get_distance_to(self.destination)
self.duration = distance / speed
else:
assert duration is not None, "Duration expected to be non-None"
self.duration = duration
def __call__(
self, time: float, app_animation_level: AnimationLevel = "full"
) -> bool:
factor = min(1.0, (time - self.start_time) / self.duration)
eased_factor = self.easing(factor)
if (
eased_factor >= 1
or app_animation_level == "none"
or app_animation_level == "basic"
and self.level == "full"
):
setattr(self.styles, self.attribute, self.final_value)
return True
if hasattr(self.start, "blend"):
value = self.start.blend(self.destination, eased_factor)
else:
value = self.start + (self.destination - self.start) * eased_factor
current = self.styles.get_rule(self.attribute)
if current != value:
setattr(self.styles, self.attribute, value)
return False
async def stop(self, complete: bool = True) -> None:
"""Stop the animation.
Args:
complete: Flag to say if the animation should be taken to completion.
Note:
[`on_complete`][Animation.on_complete] will be called regardless
of the value provided for `complete`.
"""
if complete:
setattr(self.styles, self.attribute, self.final_value)
await self.invoke_callback()
def __eq__(self, other: object) -> bool:
if isinstance(other, ScalarAnimation):
return (
self.final_value == other.final_value
and self.duration == other.duration
)
return False
| ScalarAnimation |
python | django__django | tests/timezones/tests.py | {
"start": 30239,
"end": 39363
} | class ____(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes.
# When PyYAML < 5.3 loads this representation, it subtracts the offset
# and returns a naive datetime object in UTC. PyYAML 5.3+ loads timezones
# correctly.
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]["fields"]["dt"], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName("field")[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
self.assertRegex(yaml, r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
@override_settings(DATETIME_FORMAT="c", TIME_ZONE="Africa/Nairobi", USE_TZ=True)
| SerializationTests |
python | getsentry__sentry | src/sentry/rules/history/endpoints/project_rule_group_history.py | {
"start": 1252,
"end": 2066
} | class ____(Serializer):
def get_attrs(
self, item_list: Sequence[RuleGroupHistory], user: Any, **kwargs: Any
) -> MutableMapping[Any, Any]:
serialized_groups = {
g["id"]: g for g in serialize([item.group for item in item_list], user)
}
return {
history: {"group": serialized_groups[str(history.group.id)]} for history in item_list
}
def serialize(
self, obj: RuleGroupHistory, attrs: Mapping[Any, Any], user: Any, **kwargs: Any
) -> RuleGroupHistoryResponse:
return {
"group": attrs["group"],
"count": obj.count,
"lastTriggered": obj.last_triggered,
"eventId": obj.event_id,
}
@extend_schema(tags=["issue_alerts"])
@region_silo_endpoint
| RuleGroupHistorySerializer |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_server_tool_use_block.py | {
"start": 518,
"end": 941
} | class ____(BaseModel):
id: str
caller: Caller
"""Tool invocation directly from the model."""
input: Dict[str, object]
name: Literal[
"web_search",
"web_fetch",
"code_execution",
"bash_code_execution",
"text_editor_code_execution",
"tool_search_tool_regex",
"tool_search_tool_bm25",
]
type: Literal["server_tool_use"]
| BetaServerToolUseBlock |
python | pypa__pip | tests/unit/test_vcs.py | {
"start": 27699,
"end": 34967
} | class ____(TestCase):
def setUp(self) -> None:
patcher = mock.patch("pip._internal.vcs.versioncontrol.call_subprocess")
self.addCleanup(patcher.stop)
self.call_subprocess_mock = patcher.start()
# Test Data.
self.url = "git+http://username:password@git.example.com/"
self.svn = Git()
self.rev_options = RevOptions(Git)
self.dest = "/tmp/test"
def test_fetch_new(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(2, 17)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.fetch_new(
self.dest, hide_url(self.url), self.rev_options, verbosity=1
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"clone",
"--filter=blob:none",
hide_url("git+http://username:password@git.example.com/"),
"/tmp/test",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=1)
def test_fetch_new_legacy(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 0)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.fetch_new(
self.dest, hide_url(self.url), self.rev_options, verbosity=1
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"clone",
hide_url("git+http://username:password@git.example.com/"),
"/tmp/test",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=1)
def test_fetch_new_legacy_quiet(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 0)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.fetch_new(
self.dest, hide_url(self.url), self.rev_options, verbosity=0
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"clone",
"--quiet",
hide_url("git+http://username:password@git.example.com/"),
"/tmp/test",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=0)
def test_fetch_new_quiet(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(2, 17)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.fetch_new(
self.dest, hide_url(self.url), self.rev_options, verbosity=0
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"clone",
"--filter=blob:none",
"--quiet",
hide_url("git+http://username:password@git.example.com/"),
"/tmp/test",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=0)
def test_switch(self) -> None:
with mock.patch.object(self.svn, "update_submodules") as update_submodules_mock:
self.svn.switch(
self.dest, hide_url(self.url), self.rev_options, verbosity=1
)
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"git",
"checkout",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=1)
def test_switch_quiet(self) -> None:
with mock.patch.object(self.svn, "update_submodules") as update_submodules_mock:
self.svn.switch(
self.dest, hide_url(self.url), self.rev_options, verbosity=0
)
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"git",
"checkout",
"-q",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=0)
def test_update(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 9)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.update(
self.dest, hide_url(self.url), self.rev_options, verbosity=1
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"fetch",
"--tags",
]
assert self.call_subprocess_mock.call_args_list[2][0][0] == [
"git",
"reset",
"--hard",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=1)
def test_update_legacy(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 8)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.update(
self.dest, hide_url(self.url), self.rev_options, verbosity=1
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"fetch",
]
assert self.call_subprocess_mock.call_args_list[2][0][0] == [
"git",
"reset",
"--hard",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=1)
def test_update_legacy_quiet(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 9)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.update(
self.dest, hide_url(self.url), self.rev_options, verbosity=0
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"fetch",
"--tags",
"-q",
]
assert self.call_subprocess_mock.call_args_list[2][0][0] == [
"git",
"reset",
"--hard",
"-q",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=0)
def test_update_quiet(self) -> None:
with mock.patch.object(self.svn, "get_git_version", return_value=(1, 8)):
with mock.patch.object(
self.svn, "update_submodules"
) as update_submodules_mock:
self.svn.update(
self.dest, hide_url(self.url), self.rev_options, verbosity=0
)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"git",
"fetch",
"-q",
]
assert self.call_subprocess_mock.call_args_list[2][0][0] == [
"git",
"reset",
"--hard",
"-q",
"HEAD",
]
update_submodules_mock.assert_called_with(self.dest, verbosity=0)
| TestGitArgs |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 87230,
"end": 87687
} | class ____(torch.nn.Module):
r"""A Module that uses a dynamic QAT by default."""
def __init__(self, qconfig=None):
super().__init__()
self.qconfig = qconfig or default_dynamic_qat_qconfig
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
| ManualLinearDynamicQATModel |
python | scipy__scipy | tools/gh_lists.py | {
"start": 5480,
"end": 9018
} | class ____:
def __init__(self, auth=False):
self.headers = {'User-Agent': 'gh_lists.py',
'Accept': 'application/vnd.github.v3+json'}
if auth:
self.authenticate()
req = self.urlopen('https://api.github.com/rate_limit')
try:
if req.getcode() != 200:
raise RuntimeError()
info = json.loads(req.read().decode('utf-8'))
finally:
req.close()
self.ratelimit_remaining = int(info['rate']['remaining'])
self.ratelimit_reset = float(info['rate']['reset'])
def authenticate(self):
print("Input a Github API access token.\n"
"Personal tokens can be created at https://github.com/settings/tokens\n"
"This script does not require any permissions (so don't give it any).",
file=sys.stderr, flush=True)
print("Access token: ", file=sys.stderr, end='', flush=True)
token = input()
self.headers['Authorization'] = f'token {token.strip()}'
def urlopen(self, url, auth=None):
assert url.startswith('https://')
req = Request(url, headers=self.headers)
return urlopen(req, timeout=60)
def get_multipage(self, url):
data = []
while url:
page_data, info, next_url = self.get(url)
data += page_data
url = next_url
return data
def get(self, url):
while True:
# Wait until rate limit
while self.ratelimit_remaining == 0 and self.ratelimit_reset > time.time():
s = self.ratelimit_reset + 5 - time.time()
if s <= 0:
break
print(
"[gh_lists] rate limit exceeded: waiting until {} ({} s remaining)"
.format(datetime.datetime.fromtimestamp(self.ratelimit_reset)
.strftime('%Y-%m-%d %H:%M:%S'),
int(s)),
file=sys.stderr, flush=True
)
time.sleep(min(5*60, s))
# Get page
print("[gh_lists] get:", url, file=sys.stderr, flush=True)
try:
req = self.urlopen(url)
try:
code = req.getcode()
info = req.info()
data = json.loads(req.read().decode('utf-8'))
finally:
req.close()
except HTTPError as err:
code = err.getcode()
info = err.info()
data = None
if code not in (200, 403):
raise RuntimeError()
# Parse reply
next_url = None
if 'Link' in info:
m = re.search('<([^<>]*)>; rel="next"', info['Link'])
if m:
next_url = m.group(1)
# Update rate limit info
if 'X-RateLimit-Remaining' in info:
self.ratelimit_remaining = int(info['X-RateLimit-Remaining'])
if 'X-RateLimit-Reset' in info:
self.ratelimit_reset = float(info['X-RateLimit-Reset'])
# Deal with rate limit exceeded
if code != 200 or data is None:
if self.ratelimit_remaining == 0:
continue
else:
raise RuntimeError()
# Done.
return data, info, next_url
if __name__ == "__main__":
sys.exit(main())
| GithubGet |
python | walkccc__LeetCode | solutions/490. The Maze/490.py | {
"start": 0,
"end": 747
} | class ____:
def hasPath(
self,
maze: list[list[int]],
start: list[int],
destination: list[int],
) -> bool:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(maze)
n = len(maze[0])
q = collections.deque([(start[0], start[1])])
seen = {(start[0], start[1])}
def isValid(x: int, y: int) -> bool:
return 0 <= x < m and 0 <= y < n and maze[x][y] == 0
while q:
i, j = q.popleft()
for dx, dy in DIRS:
x = i
y = j
while isValid(x + dx, y + dy):
x += dx
y += dy
if [x, y] == destination:
return True
if (x, y) in seen:
continue
q.append((x, y))
seen.add((x, y))
return False
| Solution |
python | apache__airflow | providers/apache/spark/src/airflow/providers/apache/spark/operators/spark_sql.py | {
"start": 1113,
"end": 4583
} | class ____(BaseOperator):
"""
Execute Spark SQL query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SparkSqlOperator`
:param sql: The SQL query to execute. (templated)
:param conf: arbitrary Spark configuration property
:param conn_id: connection_id string
:param total_executor_cores: (Standalone & Mesos only) Total cores for all
executors (Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param master: spark://host:port, mesos://host:port, yarn, or local
(Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:param name: Name of the job
:param num_executors: Number of executors to launch
:param verbose: Whether to pass the verbose flag to spark-sql
:param yarn_queue: The YARN queue to submit to
(Default: The ``queue`` value set in the Connection, or ``"default"``)
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql", ".hql")
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
sql: str,
conf: dict[str, Any] | str | None = None,
conn_id: str = "spark_sql_default",
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
master: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
verbose: bool = True,
yarn_queue: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self._conf = conf
self._conn_id = conn_id
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._hook: SparkSqlHook | None = None
def execute(self, context: Context) -> None:
"""Call the SparkSqlHook to run the provided sql query."""
if self._hook is None:
self._hook = self._get_hook()
self._hook.run_query()
def on_kill(self) -> None:
if self._hook is None:
self._hook = self._get_hook()
self._hook.kill()
def _get_hook(self) -> SparkSqlHook:
"""Get SparkSqlHook."""
return SparkSqlHook(
sql=self.sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
principal=self._principal,
name=self._name,
num_executors=self._num_executors,
master=self._master,
verbose=self._verbose,
yarn_queue=self._yarn_queue,
)
| SparkSqlOperator |
python | spyder-ide__spyder | spyder/utils/syntaxhighlighters.py | {
"start": 47391,
"end": 48711
} | class ____(BaseSH):
"""Fortran Syntax Highlighter"""
# Syntax highlighting rules:
PROG = re.compile(make_fortran_patterns(), re.S|re.I)
IDPROG = re.compile(r"\s+(\w+)", re.S)
# Syntax highlighting states (from one text block to another):
NORMAL = 0
def __init__(self, parent, font=None, color_scheme=None):
BaseSH.__init__(self, parent, font, color_scheme)
def highlight_block(self, text):
"""Implement highlight specific for Fortran."""
text = str(text)
self.setFormat(0, qstring_length(text), self.formats["normal"])
index = 0
for match in self.PROG.finditer(text):
for key, value in list(match.groupdict().items()):
if value:
start, end = get_span(match, key)
index += end-start
self.setFormat(start, end-start, self.formats[key])
if value.lower() in ("subroutine", "module", "function"):
match1 = self.IDPROG.match(text, end)
if match1:
start1, end1 = get_span(match1, 1)
self.setFormat(start1, end1-start1,
self.formats["definition"])
self.highlight_extras(text)
| FortranSH |
python | huggingface__transformers | src/transformers/models/falcon_h1/modeling_falcon_h1.py | {
"start": 57348,
"end": 68631
} | class ____(FalconH1PreTrainedModel):
def __init__(self, config: FalconH1Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
decoder_layers.append(FalconH1DecoderLayer(config, layer_idx=i))
self.layers = nn.ModuleList(decoder_layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = FalconH1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = FalconH1RotaryEmbedding(config=config)
self.embedding_multiplier = config.embedding_multiplier
self.lm_head_multiplier = config.lm_head_multiplier
self.gradient_checkpointing = False
# Compute the MuP vector once and register it for all layers
mup_vector = compute_mup_vector(config)
for layer in self.layers:
layer.mamba.register_buffer("mup_vector", mup_vector, persistent=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[FalconHybridMambaAttentionDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs, # NOOP kwargs, for now
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embedding_multiplier
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
logger.warning_once(
"FalconH1 requires an initialized `FalconHybridMambaAttentionDynamicCache` to return a cache. None was "
"provided, so no cache will be returned."
)
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
mamba_attention_mask=mamba_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
# append attentions only of attention layers. Mamba layers return `None` as the attention weights
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
next_cache = None if not use_cache else past_key_values
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
def _update_causal_mask(
self,
attention_mask: torch.Tensor,
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: FalconHybridMambaAttentionDynamicCache,
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[
:, :, -sequence_length:, :
].to(dtype)
padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring
| FalconH1Model |
python | scikit-learn__scikit-learn | sklearn/exceptions.py | {
"start": 1167,
"end": 1895
} | class ____(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with
appropriate arguments before using this estimator."...)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
| NotFittedError |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 15110,
"end": 16353
} | class ____(AnsibleRuntimeError):
"""An error due to attempted storage of an unsupported variable type."""
@classmethod
def from_value(cls, *, obj: t.Any) -> t.Self:
# avoid an incorrect error message when `obj` is a type
type_name = type(obj).__name__ if isinstance(obj, type) else native_type_name(obj)
return cls(message=f'Type {type_name!r} is unsupported for variable storage.', obj=obj)
def __getattr__(name: str) -> t.Any:
"""Inject import-time deprecation warnings."""
from ..utils.display import Display
match name:
case 'AnsibleFilterTypeError':
Display().deprecated(
msg=f"Importing {name!r} is deprecated.",
help_text=f"Import {AnsibleTypeError.__name__!r} instead.",
version="2.23",
)
return AnsibleTypeError
case '_AnsibleActionDone':
Display().deprecated(
msg=f"Importing {name!r} is deprecated.",
help_text="Return directly from action plugins instead.",
version="2.23",
)
return _ActionDone
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
| AnsibleVariableTypeError |
python | apache__airflow | providers/singularity/tests/unit/singularity/operators/test_singularity.py | {
"start": 1037,
"end": 6294
} | class ____:
@mock.patch("airflow.providers.singularity.operators.singularity.Client")
def test_execute(self, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
"start.return_value": 0,
"stop.return_value": 0,
},
)
client_mock.instance.return_value = instance
client_mock.execute.return_value = {"return_code": 0, "message": "message"}
task = SingularityOperator(task_id="task-id", image="docker://busybox", command="echo hello")
task.execute({})
client_mock.instance.assert_called_once_with("docker://busybox", options=[], args=None, start=False)
client_mock.execute.assert_called_once_with(mock.ANY, "echo hello", return_result=True)
execute_args, _ = client_mock.execute.call_args
assert execute_args[0] is instance
instance.start.assert_called_once_with()
instance.stop.assert_called_once_with()
@pytest.mark.parametrize("command", [pytest.param("", id="empty"), pytest.param(None, id="none")])
def test_command_is_required(self, command):
task = SingularityOperator(task_id="task-id", image="docker://busybox", command=command)
with pytest.raises(AirflowException, match="You must define a command."):
task.execute({})
@mock.patch("airflow.providers.singularity.operators.singularity.Client")
def test_image_should_be_pulled_when_not_exists(self, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
"start.return_value": 0,
"stop.return_value": 0,
},
)
client_mock.pull.return_value = "/tmp/busybox_latest.sif"
client_mock.instance.return_value = instance
client_mock.execute.return_value = {"return_code": 0, "message": "message"}
task = SingularityOperator(
task_id="task-id",
image="docker://busybox",
command="echo hello",
pull_folder="/tmp",
force_pull=True,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"/tmp/busybox_latest.sif", options=[], args=None, start=False
)
client_mock.pull.assert_called_once_with("docker://busybox", stream=True, pull_folder="/tmp")
client_mock.execute.assert_called_once_with(mock.ANY, "echo hello", return_result=True)
@pytest.mark.parametrize(
("volumes", "expected_options"),
[
(
None,
[],
),
(
[],
[],
),
(
["AAA"],
["--bind", "AAA"],
),
(
["AAA", "BBB"],
["--bind", "AAA", "--bind", "BBB"],
),
(
["AAA", "BBB", "CCC"],
["--bind", "AAA", "--bind", "BBB", "--bind", "CCC"],
),
],
)
@mock.patch("airflow.providers.singularity.operators.singularity.Client")
def test_bind_options(self, client_mock, volumes, expected_options):
instance = mock.Mock(
autospec=Instance,
**{
"start.return_value": 0,
"stop.return_value": 0,
},
)
client_mock.pull.return_value = "docker://busybox"
client_mock.instance.return_value = instance
client_mock.execute.return_value = {"return_code": 0, "message": "message"}
task = SingularityOperator(
task_id="task-id",
image="docker://busybox",
command="echo hello",
force_pull=True,
volumes=volumes,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"docker://busybox", options=expected_options, args=None, start=False
)
@pytest.mark.parametrize(
("working_dir", "expected_working_dir"),
[
(
None,
[],
),
(
"",
["--workdir", ""],
),
(
"/work-dir/",
["--workdir", "/work-dir/"],
),
],
)
@mock.patch("airflow.providers.singularity.operators.singularity.Client")
def test_working_dir(self, client_mock, working_dir, expected_working_dir):
instance = mock.Mock(
autospec=Instance,
**{
"start.return_value": 0,
"stop.return_value": 0,
},
)
client_mock.pull.return_value = "docker://busybox"
client_mock.instance.return_value = instance
client_mock.execute.return_value = {"return_code": 0, "message": "message"}
task = SingularityOperator(
task_id="task-id",
image="docker://busybox",
command="echo hello",
force_pull=True,
working_dir=working_dir,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"docker://busybox", options=expected_working_dir, args=None, start=False
)
| TestSingularityOperator |
python | kamyu104__LeetCode-Solutions | Python/sum-root-to-leaf-numbers.py | {
"start": 181,
"end": 642
} | class ____(object):
# @param root, a tree node
# @return an integer
def sumNumbers(self, root):
return self.sumNumbersRecu(root, 0)
def sumNumbersRecu(self, root, num):
if root is None:
return 0
if root.left is None and root.right is None:
return num * 10 + root.val
return self.sumNumbersRecu(root.left, num * 10 + root.val) + self.sumNumbersRecu(root.right, num * 10 + root.val)
| Solution |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/user_defined_protos_pb2_grpc.py | {
"start": 234,
"end": 1478
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.__call__ = channel.unary_unary(
"/userdefinedprotos.UserDefinedService/__call__",
request_serializer=user__defined__protos__pb2.UserDefinedMessage.SerializeToString, # noqa: E501
response_deserializer=user__defined__protos__pb2.UserDefinedResponse.FromString, # noqa: E501
)
self.Multiplexing = channel.unary_unary(
"/userdefinedprotos.UserDefinedService/Multiplexing",
request_serializer=user__defined__protos__pb2.UserDefinedMessage2.SerializeToString, # noqa: E501
response_deserializer=user__defined__protos__pb2.UserDefinedResponse2.FromString, # noqa: E501
)
self.Streaming = channel.unary_stream(
"/userdefinedprotos.UserDefinedService/Streaming",
request_serializer=user__defined__protos__pb2.UserDefinedMessage.SerializeToString, # noqa: E501
response_deserializer=user__defined__protos__pb2.UserDefinedResponse.FromString, # noqa: E501
)
| UserDefinedServiceStub |
python | huggingface__transformers | src/transformers/models/canine/tokenization_canine.py | {
"start": 1924,
"end": 5868
} | class ____(PreTrainedTokenizer):
r"""
Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then
converts each character into its Unicode code point.
[`CanineTokenizer`] inherits from [`PreTrainedTokenizer`].
Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters.
Args:
model_max_length (`int`, *optional*, defaults to 2048):
The maximum sentence length the model accepts.
"""
def __init__(
self,
bos_token=chr(CLS),
eos_token=chr(SEP),
sep_token=chr(SEP),
cls_token=chr(CLS),
pad_token=chr(PAD),
mask_token=chr(MASK),
add_prefix_space=False,
model_max_length=2048,
**kwargs,
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
# Creates a mapping for looking up the IDs of special symbols.
self._special_codepoints: dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
self._special_codepoints[name] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
self._special_codepoint_strings: dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
self._unicode_vocab_size = UNICODE_VOCAB_SIZE
self._num_special_tokens = len(self._special_codepoints)
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
model_max_length=model_max_length,
token_type_ids_pattern="all_zeros",
token_type_ids_include_special_tokens=True,
special_tokens_pattern="cls_sep",
**kwargs,
)
@property
def vocab_size(self) -> int:
return self._unicode_vocab_size
def get_vocab(self):
vocab = {chr(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
"""Tokenize a string (i.e. perform character splitting)."""
return list(text)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value)."""
try:
return ord(token)
except TypeError:
raise ValueError(f"invalid token: '{token}'")
def _convert_id_to_token(self, index: int) -> str:
"""
Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to
human-readable format.
"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(index)
except TypeError:
raise ValueError(f"invalid id: {index}")
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
__all__ = ["CanineTokenizer"]
| CanineTokenizer |
python | django__django | django/contrib/postgres/search.py | {
"start": 2150,
"end": 2272
} | class ____(CheckPostgresInstalledMixin, Field):
def db_type(self, connection):
return "tsquery"
| SearchQueryField |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/models/__init__.py | {
"start": 3680,
"end": 4106
} | class ____(Model):
"""Represents permission actions such as `can_read`."""
__tablename__ = "ab_permission"
id: Mapped[int] = mapped_column(
Integer,
Sequence("ab_permission_id_seq", start=1, increment=1, minvalue=1, cycle=False),
primary_key=True,
)
name: Mapped[str] = mapped_column(String(100), unique=True, nullable=False)
def __repr__(self):
return self.name
| Action |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 21763,
"end": 23144
} | class ____:
def __init__(self, func_name: str = ""):
self.func_name = func_name
self.current_node: Optional[torch.fx.Node] = None
self.opt_ctx: Optional[OptimizationContext] = None
def __enter__(self):
assert V.interpreter
assert V.interpreter.current_node
self.current_node = V.interpreter.current_node
assert self.current_node is not None
if OptimizationContext.key in self.current_node.meta:
self.opt_ctx = self.current_node.meta[OptimizationContext.key]
else:
self.opt_ctx = OptimizationContext()
assert self.opt_ctx is not None
self.opt_ctx.ops_name = self.func_name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert self.current_node
assert self.opt_ctx
self.current_node.meta[OptimizationContext.key] = self.opt_ctx
def get_opt_ctx(self):
return self.opt_ctx
def get_fx_node(self):
assert self.current_node
return self.current_node
def decltype_promoted(*args):
assert not any(isinstance(arg, CppCSEVariable) and arg.is_vec for arg in args), (
"Promotion of vector types is not supported"
)
if (dt := get_promote_dtype(args)) is not None:
return DTYPE_TO_CPP[dt]
else:
return f"decltype({args[0]})"
| RecordOptimizationContext |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict18.py | {
"start": 213,
"end": 421
} | class ____(TypedDict, Generic[_T1, _T2]):
a: dict[_T1, _T2]
b: _T1
v1_1: TD1[str, int] = {"a": {"x": 3}, "b": "y"}
# This should generate an error.
v1_2: TD1[str, str] = {"a": {"x": 3}, "b": "y"}
| TD1 |
python | PyCQA__flake8 | src/flake8/style_guide.py | {
"start": 675,
"end": 850
} | class ____(enum.Enum):
"""Enum representing an explicitly or implicitly ignored code."""
Explicitly = "explicitly ignored"
Implicitly = "implicitly ignored"
| Ignored |
python | openai__openai-python | src/openai/types/responses/custom_tool.py | {
"start": 283,
"end": 736
} | class ____(BaseModel):
name: str
"""The name of the custom tool, used to identify it in tool calls."""
type: Literal["custom"]
"""The type of the custom tool. Always `custom`."""
description: Optional[str] = None
"""Optional description of the custom tool, used to provide more context."""
format: Optional[CustomToolInputFormat] = None
"""The input format for the custom tool. Default is unconstrained text."""
| CustomTool |
python | pytorch__pytorch | test/onnx/test_models_onnxruntime.py | {
"start": 5536,
"end": 14299
} | class ____(onnx_test_common._TestONNXRuntime):
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Faster RCNN model is not scriptable
def test_faster_rcnn(self):
model = faster_rcnn.fasterrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
model.eval()
x1 = torch.randn(3, 200, 300, requires_grad=True)
x2 = torch.randn(3, 200, 300, requires_grad=True)
self.run_test(model, ([x1, x2],), rtol=1e-3, atol=1e-5)
self.run_test(
model,
([x1, x2],),
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
images, test_images = _get_test_images()
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
additional_test_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
@unittest.skip("Failing after ONNX 1.13.0")
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_mask_rcnn(self):
model = mask_rcnn.maskrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
images, test_images = _get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
additional_test_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
@unittest.skip("Failing, see https://github.com/pytorch/pytorch/issues/66528")
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_keypoint_rcnn(self):
model = keypoint_rcnn.keypointrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=False, min_size=200, max_size=300
)
images, test_images = _get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_images = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_images,),
additional_test_inputs=[(dummy_images,), (test_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_roi_heads(self):
class RoIHeadsModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.transform = _init_test_generalized_rcnn_transform()
self.rpn = _init_test_rpn()
self.roi_heads = _init_test_roi_heads_faster_rcnn()
def forward(self, images, features: Mapping[str, torch.Tensor]):
original_image_sizes = [
(img.shape[-1], img.shape[-2]) for img in images
]
images_m = image_list.ImageList(
images, [(i.shape[-1], i.shape[-2]) for i in images]
)
proposals, _ = self.rpn(images_m, features)
detections, _ = self.roi_heads(
features, proposals, images_m.image_sizes
)
detections = self.transform.postprocess(
detections, images_m.image_sizes, original_image_sizes
)
return detections
images = torch.rand(2, 3, 100, 100)
features = _get_features(images)
images2 = torch.rand(2, 3, 150, 150)
test_features = _get_features(images2)
model = RoIHeadsModule()
model.eval()
model(images, features)
self.run_test(
model,
(images, features),
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
additional_test_inputs=[(images, features), (images2, test_features)],
)
@skipScriptTest() # TODO: #75625
@skipIfUnsupportedMinOpsetVersion(20)
def test_transformer_encoder(self):
class MyModule(torch.nn.Module):
def __init__(self, ninp, nhead, nhid, dropout, nlayers):
super().__init__()
encoder_layers = nn.TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layers, nlayers
)
def forward(self, input):
return self.transformer_encoder(input)
x = torch.rand(10, 32, 512)
self.run_test(MyModule(512, 8, 2048, 0.0, 3), (x,), atol=1e-5)
@skipScriptTest()
def test_mobilenet_v3(self):
model = torchvision.models.quantization.mobilenet_v3_large(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224)
self.run_test(model, (dummy_input,))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_shufflenet_v2_dynamic_axes(self):
model = torchvision.models.shufflenet_v2_x0_5(weights=None)
dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)
test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True)
self.run_test(
model,
(dummy_input,),
additional_test_inputs=[(dummy_input,), (test_inputs,)],
input_names=["input_images"],
output_names=["outputs"],
dynamic_axes={
"input_images": {0: "batch_size"},
"output": {0: "batch_size"},
},
rtol=1e-3,
atol=1e-5,
)
if __name__ == "__main__":
common_utils.run_tests()
| TestModelsONNXRuntime |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 1676,
"end": 1913
} | class ____(Definition):
name: str
args: Optional[Arguments]
returns: Optional[List[ASTNode]]
body: List[ASTNode]
decorators: Optional[List[ASTNode]]
pos: Optional[any] # not sure what this is
@dataclass
| FunctionDef |
python | pandas-dev__pandas | pandas/tests/indexes/interval/test_indexing.py | {
"start": 388,
"end": 1747
} | class ____:
def test_getitem(self, closed):
idx = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed)
assert idx[0] == Interval(0.0, 1.0, closed=closed)
assert idx[1] == Interval(1.0, 2.0, closed=closed)
assert isna(idx[2])
result = idx[0:1]
expected = IntervalIndex.from_arrays((0.0,), (1.0,), closed=closed)
tm.assert_index_equal(result, expected)
result = idx[0:2]
expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), closed=closed)
tm.assert_index_equal(result, expected)
result = idx[1:3]
expected = IntervalIndex.from_arrays(
(1.0, np.nan), (2.0, np.nan), closed=closed
)
tm.assert_index_equal(result, expected)
def test_getitem_2d_deprecated(self):
# GH#30588 multi-dim indexing is deprecated, but raising is also acceptable
idx = IntervalIndex.from_breaks(range(11), closed="right")
with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
idx[:, None]
with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
# GH#44051
idx[True]
with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
# GH#44051
idx[False]
| TestGetItem |
python | mlflow__mlflow | mlflow/utils/async_logging/async_artifacts_logging_queue.py | {
"start": 510,
"end": 9984
} | class ____:
"""
This is a queue based run data processor that queue incoming data and process it using a single
worker thread. This class is used to process artifacts saving in async fashion.
Args:
logging_func: A callable function that takes in three arguments:
- filename: The name of the artifact file.
- artifact_path: The path to the artifact.
- artifact: The artifact to be logged.
"""
def __init__(
self, artifact_logging_func: Callable[[str, str, Union["PIL.Image.Image"]], None]
) -> None:
self._queue: Queue[RunArtifact] = Queue()
self._lock = threading.RLock()
self._artifact_logging_func = artifact_logging_func
self._stop_data_logging_thread_event = threading.Event()
self._is_activated = False
def _at_exit_callback(self) -> None:
"""Callback function to be executed when the program is exiting.
Stops the data processing thread and waits for the queue to be drained. Finally, shuts down
the thread pools used for data logging and artifact processing status check.
"""
try:
# Stop the data processing thread
self._stop_data_logging_thread_event.set()
# Waits till logging queue is drained.
self._artifact_logging_thread.join()
self._artifact_logging_worker_threadpool.shutdown(wait=True)
self._artifact_status_check_threadpool.shutdown(wait=True)
except Exception as e:
_logger.error(f"Encountered error while trying to finish logging: {e}")
def flush(self) -> None:
"""Flush the async logging queue.
Calling this method will flush the queue to ensure all the data are logged.
"""
# Stop the data processing thread.
self._stop_data_logging_thread_event.set()
# Waits till logging queue is drained.
self._artifact_logging_thread.join()
self._artifact_logging_worker_threadpool.shutdown(wait=True)
self._artifact_status_check_threadpool.shutdown(wait=True)
# Restart the thread to listen to incoming data after flushing.
self._stop_data_logging_thread_event.clear()
self._set_up_logging_thread()
def _logging_loop(self) -> None:
"""
Continuously logs run data until `self._continue_to_process_data` is set to False.
If an exception occurs during logging, a `MlflowException` is raised.
"""
try:
while not self._stop_data_logging_thread_event.is_set():
self._log_artifact()
# Drain the queue after the stop event is set.
while not self._queue.empty():
self._log_artifact()
except Exception as e:
from mlflow.exceptions import MlflowException
raise MlflowException(f"Exception inside the run data logging thread: {e}")
def _log_artifact(self) -> None:
"""Process the run's artifacts in the running runs queues.
For each run in the running runs queues, this method retrieves the next artifact of run
from the queue and processes it by calling the `_artifact_logging_func` method with the run
ID and artifact. If the artifact is empty, it is skipped. After processing the artifact,
the processed watermark is updated and the artifact event is set.
If an exception occurs during processing, the exception is logged and the artifact event
is set with the exception. If the queue is empty, it is ignored.
"""
try:
run_artifact = self._queue.get(timeout=1)
except Empty:
# Ignore empty queue exception
return
def logging_func(run_artifact):
try:
self._artifact_logging_func(
filename=run_artifact.filename,
artifact_path=run_artifact.artifact_path,
artifact=run_artifact.artifact,
)
# Signal the artifact processing is done.
run_artifact.completion_event.set()
except Exception as e:
_logger.error(f"Failed to log artifact {run_artifact.filename}. Exception: {e}")
run_artifact.exception = e
run_artifact.completion_event.set()
self._artifact_logging_worker_threadpool.submit(logging_func, run_artifact)
def _wait_for_artifact(self, artifact: RunArtifact) -> None:
"""Wait for given artifacts to be processed by the logging thread.
Args:
artifact: The artifact to wait for.
Raises:
Exception: If an exception occurred while processing the artifact.
"""
artifact.completion_event.wait()
if artifact.exception:
raise artifact.exception
def __getstate__(self):
"""Return the state of the object for pickling.
This method is called by the `pickle` module when the object is being pickled. It returns a
dictionary containing the object's state, with non-picklable attributes removed.
Returns:
dict: A dictionary containing the object's state.
"""
state = self.__dict__.copy()
del state["_queue"]
del state["_lock"]
del state["_is_activated"]
if "_stop_data_logging_thread_event" in state:
del state["_stop_data_logging_thread_event"]
if "_artifact_logging_thread" in state:
del state["_artifact_logging_thread"]
if "_artifact_logging_worker_threadpool" in state:
del state["_artifact_logging_worker_threadpool"]
if "_artifact_status_check_threadpool" in state:
del state["_artifact_status_check_threadpool"]
return state
def __setstate__(self, state):
"""Set the state of the object from a given state dictionary.
It pops back the removed non-picklable attributes from `self.__getstate__()`.
Args:
state (dict): A dictionary containing the state of the object.
Returns:
None
"""
self.__dict__.update(state)
self._queue = Queue()
self._lock = threading.RLock()
self._is_activated = False
self._artifact_logging_thread = None
self._artifact_logging_worker_threadpool = None
self._artifact_status_check_threadpool = None
self._stop_data_logging_thread_event = threading.Event()
def log_artifacts_async(self, filename, artifact_path, artifact) -> RunOperations:
"""Asynchronously logs runs artifacts.
Args:
filename: Filename of the artifact to be logged.
artifact_path: Directory within the run's artifact directory in which to log the
artifact.
artifact: The artifact to be logged.
Returns:
mlflow.utils.async_utils.RunOperations: An object that encapsulates the
asynchronous operation of logging the artifact of run data.
The object contains a list of `concurrent.futures.Future` objects that can be used
to check the status of the operation and retrieve any exceptions
that occurred during the operation.
"""
from mlflow import MlflowException
if not self._is_activated:
raise MlflowException("AsyncArtifactsLoggingQueue is not activated.")
artifact = RunArtifact(
filename=filename,
artifact_path=artifact_path,
artifact=artifact,
completion_event=threading.Event(),
)
self._queue.put(artifact)
operation_future = self._artifact_status_check_threadpool.submit(
self._wait_for_artifact, artifact
)
return RunOperations(operation_futures=[operation_future])
def is_active(self) -> bool:
return self._is_activated
def _set_up_logging_thread(self) -> None:
"""Sets up the logging thread.
If the logging thread is already set up, this method does nothing.
"""
with self._lock:
self._artifact_logging_thread = threading.Thread(
target=self._logging_loop,
name="MLflowAsyncArtifactsLoggingLoop",
daemon=True,
)
self._artifact_logging_worker_threadpool = ThreadPoolExecutor(
max_workers=5,
thread_name_prefix="MLflowArtifactsLoggingWorkerPool",
)
self._artifact_status_check_threadpool = ThreadPoolExecutor(
max_workers=5,
thread_name_prefix="MLflowAsyncArtifactsLoggingStatusCheck",
)
self._artifact_logging_thread.start()
def activate(self) -> None:
"""Activates the async logging queue
1. Initializes queue draining thread.
2. Initializes threads for checking the status of logged artifact.
3. Registering an atexit callback to ensure that any remaining log data
is flushed before the program exits.
If the queue is already activated, this method does nothing.
"""
with self._lock:
if self._is_activated:
return
self._set_up_logging_thread()
atexit.register(self._at_exit_callback)
self._is_activated = True
| AsyncArtifactsLoggingQueue |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 20171,
"end": 26707
} | class ____(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return framework_function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
return self.evaluate(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
self.assertAllEqual([40.0], self.evaluate(grads)[0])
self.assertAllEqual([10.0], self.evaluate(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = framework_function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(
self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = framework_function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(
self.XSquarePlusBGradient)
with self.assertRaisesRegex(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
def testGradientWrtCaptured(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0, name="x")
@def_function.function
def Foo():
y = math_ops.multiply(x, 2.0, name="y")
g = gradients_impl.gradients(y, x)
return g[0]
f = Foo()
self.assertEqual(self.evaluate(f), 2.0)
def testGradientOfCaptured(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0, name="x")
y = math_ops.multiply(x, 2.0, name="y")
@framework_function.Defun()
def Foo():
g = gradients_impl.gradients(y, x)
return g[0]
f = Foo()
self.assertEqual(self.evaluate(f), 2.0)
def testCapturedResourceVariable(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(1.0, name="var")
@def_function.function
def Foo():
y = math_ops.multiply(var, 2.0, name="y")
g = gradients_impl.gradients(y, var)
return g[0]
f = Foo()
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(f), 2.0)
def testCapturedNested(self):
with ops.Graph().as_default():
x1 = constant_op.constant(1.0, name="x1")
x2 = constant_op.constant(2.0, name="x2")
x3 = math_ops.multiply(x1, x2, name="x3")
@def_function.function
def Outer():
outer1 = array_ops.identity(x1, name="outer1")
@def_function.function
def Inner():
inner1 = array_ops.identity(outer1, name="inner1")
inner2 = array_ops.identity(x2, name="inner2")
inner3 = array_ops.identity(x3, name="inner3")
return gradients_impl.gradients([inner1, inner2, inner3, x1],
[x1, x2])
return Inner()
x1_grad, x2_grad = Outer()
# 1.0 + None + 2.0 + 1.0 = 4.0
self.assertEqual(self.evaluate(x1_grad), 4.0)
# None + 1.0 + 1.0 + None = 2.0
self.assertEqual(self.evaluate(x2_grad), 2.0)
def testCapturedFromFunction(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0, name="x")
@def_function.function
def Outer():
y = math_ops.multiply(x, 2.0, name="y")
@def_function.function
def Inner():
z = math_ops.multiply(y, 3.0, name="z")
g = gradients_impl.gradients(z, y)
return g[0]
return Inner()
z_grad = Outer()
self.assertEqual(self.evaluate(z_grad), 3.0)
def testCapturedEagerTensors(self):
# Test that we can handle captured eager tensors unrelated to the gradient
# computation (i.e. we need to ignore them).
# TODO(skyewm): make it an error if you try to take the gradient wrt a
# captured EagerTensor
with context.eager_mode():
c = constant_op.constant(2.0, name="c")
@def_function.function
def Foo():
x = constant_op.constant(10.0, name="x")
y = math_ops.multiply(x, c, name="y")
# Regression test for b/122564611.
z = math_ops.multiply(c, y, name="z")
g = gradients_impl.gradients(z, x)
return g[0]
self.assertEqual(Foo().numpy(), 4.0)
| FunctionGradientsTest |
python | bokeh__bokeh | tests/unit/bokeh/test_objects.py | {
"start": 4230,
"end": 4853
} | class ____:
def test_references_large(self) -> None:
root, objects = large_plot(10)
assert set(root.references()) == objects
def test_references_deep(self) -> None:
root = DeepModel()
objects = {root}
parent = root
# in a previous implementation, about 400 would blow max
# recursion depth, so we double that and a little bit,
# here.
for _ in range(900):
model = DeepModel()
objects.add(model)
parent.child = model
parent = model
assert set(root.references()) == objects
| TestCollectModels |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_strategy_model_parallelism_test.py | {
"start": 3405,
"end": 20931
} | class ____(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
@parameterized.named_parameters([("packed", True), ("unpacked", False)])
def test_spmd_variable_structure(self, enable_packing):
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
# pylint: disable=protected-access
if enable_packing:
self.assertTrue(strategy._enable_packed_variable_in_eager_mode,
"packed variables should be enabled by default")
else:
strategy._enable_packed_variable_in_eager_mode = False
# pylint: enable=protected-access
tensor = constant_op.constant([[0., 1.], [2., 3.]])
# Test TPUMirroredVariable and TPUSyncOnReadVariable
with strategy.scope():
v = variables.Variable(
tensor, name="v", synchronization=vs.VariableSynchronization.ON_READ)
w = variables.Variable(
tensor, name="w", synchronization=vs.VariableSynchronization.ON_WRITE)
def test_read(x):
@def_function.function
def fn():
return x.read_value()
results = strategy.run(fn)
results = strategy.experimental_local_results(results)
for i in range(num_replicas):
self.assertAllClose(results[i], tensor)
def test_structure(values):
for i, value in enumerate(values):
self.assertIsInstance(
value, tpu_replicated_variable.TPUReplicatedVariable)
packed_var = getattr(value, "_packed_var", None)
if enable_packing:
if i == 0:
self.assertIsInstance(packed_var, packed.PackedDistributedVariable)
else:
self.assertIs(packed_var, values[0]._packed_var, # pylint: disable=protected-access
"all vals should share the same packed var instance")
else:
self.assertIsNone(packed_var)
if enable_packing:
# pylint: disable=protected-access
resources = sum((value._vars for value in values), [])
dist_vars = packed_var._distributed_variables
# pylint: enable=protected-access
self.assertLen(resources, len(dist_vars))
for dist_var, resource in zip(dist_vars, resources):
self.assertIs(dist_var, resource)
test_read(v)
test_structure(v.values)
test_read(w)
test_structure(w.values)
@unittest.skip("Non-SPMD model parallelism is no longer supported")
def test_logical_device_assignment(self):
strategy, num_replicas = get_tpu_strategy()
with strategy.scope():
v = variables.Variable(2.)
with strategy.extended.experimental_logical_device(1):
w = variables.Variable(3.)
self.assertLen(strategy.experimental_local_results(v), num_replicas)
self.assertLen(strategy.experimental_local_results(w), num_replicas)
self.assertEqual("/job:localhost/replica:0/task:0/device:TPU:0",
strategy.experimental_local_results(v)[0].device)
self.assertEqual("/job:localhost/replica:0/task:0/device:TPU:1",
strategy.experimental_local_results(w)[0].device)
logical_devices = []
@def_function.function
def f(x):
replica_ctx = distribute_lib.get_replica_context()
with replica_ctx.experimental_logical_device(0):
y = v * x
with replica_ctx.experimental_logical_device(1):
z = w * y
logical_devices.append((y.device, z.device))
return z
result = strategy.run(f, args=(5.,))
self.assertEqual(
[("/device:TPU_REPLICATED_CORE:0", "/device:TPU_REPLICATED_CORE:1")],
logical_devices)
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(30. * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)))
@unittest.skip("Non-SPMD model parallelism is no longer supported")
def test_paritioned_model_checkpointing(self):
class PartitionedModel(module.Module):
def __init__(self, v, w):
super(PartitionedModel, self).__init__()
assert distribute_lib.has_strategy()
strategy = distribute_lib.get_strategy()
with strategy.extended.experimental_logical_device(0):
self.v = variables.Variable(v)
with strategy.extended.experimental_logical_device(1):
self.w = variables.Variable(w)
def __call__(self, x):
replica_ctx = distribute_lib.get_replica_context()
with replica_ctx.experimental_logical_device(0):
y = self.v * x
with replica_ctx.experimental_logical_device(1):
z = self.w * y
return z
def change_weights_op(self, v_new, w_new):
return control_flow_ops.group(
[self.v.assign(v_new), self.w.assign(w_new)])
strategy, num_replicas = get_tpu_strategy()
with strategy.scope():
model = PartitionedModel(2., 3.)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = util.Checkpoint(model=model)
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
checkpoint.save(file_prefix=checkpoint_prefix)
self.evaluate(model.change_weights_op(1., 4.))
result = strategy.run(def_function.function(model), args=(5.0,))
self.assertEqual(20. * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)))
status = checkpoint.restore(
checkpoint_management.latest_checkpoint(checkpoint_dir))
status.run_restore_ops(sess) # must run restore op in non-eager mode.
status.assert_consumed()
status.assert_existing_objects_matched()
result = strategy.run(def_function.function(model), args=(5.0,))
self.assertEqual(30. * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)))
def test_spmd_cannot_assign_tensor_to_logical_device(self):
strategy, _ = get_tpu_strategy(enable_spmd=True)
x = constant_op.constant([0, 1])
with self.assertRaises(ValueError):
strategy.experimental_assign_to_logical_device(x, 0)
def test_spmd_variable_created_from_callable(self):
initilizer = lambda: random_ops.random_normal(shape=(16, 16))
strategy, _ = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
w = variables.Variable(initilizer)
value0 = w.values[0]
for v in value0.variables:
self.assertAllEqual(v, value0.variables[0])
def test_spmd_variable_read(self):
batch_size = 32
num_feature_in = 16
num_feature_out = 8
x = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
w_init = random_ops.random_uniform((num_feature_in, num_feature_out),
dtype=dtypes.float32)
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
w = variables.Variable(w_init, dtype=dtypes.float32)
self.assertEqual(w.values[0].variables[0].shape.as_list(),
[num_feature_in, num_feature_out])
self.assertEqual(w.shape.as_list(), [num_feature_in, num_feature_out])
def step_fn(batch_features):
predict = math_ops.matmul(batch_features, w)
return predict
@def_function.function
def train_fn(batch_features):
return strategy.run(step_fn, args=(batch_features,))
result = train_fn(x)
self.assertAllClose(
strategy.reduce("SUM", result, axis=None),
math_ops.matmul(x, w_init) * num_replicas,
rtol=5e-03,
atol=5e-03)
def test_spmd_variable_read_init_scope(self):
strategy, _ = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
v = variables.Variable(array_ops.ones((4, 4), dtype=dtypes.float32))
@def_function.function
def read_v():
with ops.init_scope():
return v.read_value()
result = strategy.reduce("MEAN", strategy.run(read_v), axis=None)
self.assertAllClose(result, v.read_value())
def test_spmd_variable_update(self):
batch_size = 1024
num_feature_in = 256
x = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
w_init = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
w = variables.Variable(w_init, dtype=dtypes.float32)
self.assertIsInstance(w, tpu_values.TPUMirroredVariable)
self.assertTrue(w._is_replicated_or_sharded_to_logical_cores())
def make_strategy_run(fn):
def run(value):
return strategy.run(fn, args=(value,))
return def_function.function(run)
result = make_strategy_run(w.assign)(x)
self.assertAllClose(
strategy.reduce("SUM", result, axis=None), x * num_replicas)
delta = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
result = make_strategy_run(w.assign_sub)(delta)
x -= delta
self.assertAllClose(
strategy.reduce("SUM", result, axis=None), x * num_replicas)
delta = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
result = make_strategy_run(w.assign_add)(delta)
x += delta
self.assertAllClose(
strategy.reduce("SUM", result, axis=None), x * num_replicas)
def test_spmd_variable_eager_update(self):
batch_size = 32
num_feature_in = 16
x = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
w_init = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
strategy, _ = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
w = variables.Variable(w_init, dtype=dtypes.float32)
w.assign(x)
result = w.numpy()
self.assertAllClose(result, x)
x1 = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
w.assign_sub(x1)
result = w.numpy()
self.assertAllClose(result, x - x1)
x2 = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
w.assign(x)
w.assign_add(x2)
result = w.numpy()
self.assertAllClose(result, x + x2)
def test_spmd_model_checkpointing(self):
class LinearModel(module.Module):
def __init__(self, w):
super(LinearModel, self).__init__()
self.w = variables.Variable(w)
def __call__(self, x):
return math_ops.matmul(x, self.w)
def change_weights_op(self, w_new):
return self.w.assign(w_new)
batch_size = 32
num_feature_in = 16
num_feature_out = 8
w1 = random_ops.random_uniform((num_feature_in, num_feature_out),
dtype=dtypes.float32)
w2 = random_ops.random_uniform((num_feature_in, num_feature_out),
dtype=dtypes.float32)
x = random_ops.random_uniform((batch_size, num_feature_in),
dtype=dtypes.float32)
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
with strategy.scope():
model = LinearModel(w1)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = util.Checkpoint(model=model)
@def_function.function
def step_fn(x):
x = strategy.experimental_split_to_logical_devices(x, [1, 2])
return model(x)
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
checkpoint.save(file_prefix=checkpoint_prefix)
self.evaluate(model.change_weights_op(w2))
result = strategy.run(step_fn, args=(x,))
self.assertAllClose(
math_ops.matmul(x, w2) * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)),
rtol=5e-3,
atol=5e-3)
status = checkpoint.restore(
checkpoint_management.latest_checkpoint(checkpoint_dir))
status.run_restore_ops(sess) # must run restore op in non-eager mode.
status.assert_consumed()
status.assert_existing_objects_matched()
result = strategy.run(step_fn, args=(x,))
self.assertAllClose(
math_ops.matmul(x, w1) * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)),
rtol=5e-3,
atol=5e-3)
def test_spmd_with_summary(self):
original_device_placement = config.get_soft_device_placement()
config.set_soft_device_placement(True)
strategy, _ = get_tpu_strategy(enable_spmd=True)
summary_dir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(summary_dir)
const_multiple = 2
num_iters = 10
expected_event_count = num_iters + 1
with strategy.scope():
step = variables.Variable(1, dtype=dtypes.int64)
@def_function.function
def run():
with writer.as_default():
with summary_ops.record_if(True):
summary_ops.scalar("result", step * const_multiple, step=step)
step.assign_add(1)
for _ in range(num_iters):
strategy.run(run, args=())
for val in step.values:
for var in val.variables:
self.assertAllEqual(expected_event_count, var)
events = summary_test_util.events_from_logdir(summary_dir)
self.assertLen(events, expected_event_count)
# Event[0] is generic metadata and summary_ops data starts at event[1].
for logged_step in range(1, expected_event_count):
self.assertEqual(events[logged_step].summary.value[0].simple_value,
logged_step * const_multiple)
config.set_soft_device_placement(original_device_placement)
# Tests SPMD with outside compilation. One test case is for replicated
# sharding of the input tensor and one case is for split sharding of the input
# tensor.
@parameterized.parameters([False, True])
def test_spmd_with_outside_comp(self, split):
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
def host_inc(x):
return x + 1
@def_function.function
def fn(x):
if split:
x = strategy.experimental_split_to_logical_devices(x, [1, 2])
y = x + 1
z = tpu_replication.outside_compilation(host_inc, y)
a = z + 1
return a
arg = constant_op.constant(0, shape=(2, 2), dtype=dtypes.int64)
result = strategy.run(fn, args=(arg,))
self.assertAllEqual(
(arg + 3) * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)))
# Tests auto_to_manual_spmd_partition and manual_to_auto_spmd_partition.
# The internal versions of these ops are XlaSpmdFullToShardShape and
# XlaSpmdShardToFullShape.
def test_manual_sharding_ops(self):
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
@def_function.function
def fn(x):
x_split = strategy.experimental_split_to_logical_devices(x, [1, 2])
split_sharding = xla_sharding.get_op_sharding(x_split.op)
x_manual = xla_sharding.auto_to_manual_spmd_partition(
x_split, split_sharding
)
y_manual = x_manual + 1
y_split = xla_sharding.manual_to_auto_spmd_partition(
y_manual, split_sharding, (2, 2)
)
return y_split
arg = constant_op.constant(0, shape=(2, 2), dtype=dtypes.int64)
result = strategy.run(fn, args=(arg,))
self.assertAllEqual(
(arg + 1) * num_replicas,
self.evaluate(strategy.reduce("SUM", result, axis=None)),
)
# Test mapping of a host-side function onto each shard.
def test_spmd_with_map_outside_comp_inc(self):
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
def host_inc(x):
return x + 1
@def_function.function
def fn(a):
b = strategy.experimental_split_to_logical_devices(a, [2, 1])
c = tpu_replication.experimental_map_outside_compilation(host_inc, b)
d = strategy.experimental_split_to_logical_devices(c, [2, 1])
return d
arg = constant_op.constant(
[[0, 1], [2, 3]], shape=(2, 2), dtype=dtypes.int64
)
result = strategy.run(fn, args=(arg,))
expected = (arg + 1) * num_replicas
self.assertAllEqual(
expected, self.evaluate(strategy.reduce("SUM", result, axis=None))
)
# Test mapping of an l2_normalize host-side function onto each shard. This is
# not a point-wise function so the result is different from ordinary outside
# compilation.
def test_spmd_with_map_outside_comp_l2norm(self):
strategy, num_replicas = get_tpu_strategy(enable_spmd=True)
def host_norm(x):
return nn.l2_normalize(x)
@def_function.function
def fn(a):
b = strategy.experimental_split_to_logical_devices(a, [2, 1])
c = tpu_replication.experimental_map_outside_compilation(host_norm, b)
d = strategy.experimental_split_to_logical_devices(c, [2, 1])
return d
arg = constant_op.constant([[0, 1], [2, 3]], dtype=dtypes.float32)
result = strategy.run(fn, args=(arg,))
expected = nn.l2_normalize(arg, axis=1) * num_replicas
self.assertAllEqual(
expected, self.evaluate(strategy.reduce("SUM", result, axis=None))
)
if __name__ == "__main__":
test.main()
| TPUStrategyModelParallelismTest |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 35800,
"end": 35932
} | class ____(CyUp):
"""
Go down a Cython, Python or relevant C frame.
"""
name = 'cy down'
_command = 'down'
| CyDown |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_response_usage.py | {
"start": 232,
"end": 566
} | class ____(BaseModel):
audio_tokens: Optional[int] = None
"""The number of audio tokens used in the Response."""
cached_tokens: Optional[int] = None
"""The number of cached tokens used in the Response."""
text_tokens: Optional[int] = None
"""The number of text tokens used in the Response."""
| InputTokenDetails |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 10252,
"end": 10602
} | class ____(ToggleInput):
""" A checkbox-like widget. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
on_icon = Nullable(IconLike, default=None, help="""
""")
off_icon = Nullable(IconLike, default=None, help="""
""")
| Switch |
python | django__django | tests/migrations/test_migrations_squashed_complex_multi_apps/app1/2_squashed_3.py | {
"start": 35,
"end": 282
} | class ____(migrations.Migration):
replaces = [
("app1", "2_auto"),
("app1", "3_auto"),
]
dependencies = [("app1", "1_auto"), ("app2", "2_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 29954,
"end": 31134
} | class ____(BaseUserFunctionVariable):
def __init__(
self, fn: types.BuiltinMethodType, is_constant: bool = False, **kwargs: Any
) -> None:
super().__init__(**kwargs)
assert isinstance(fn, types.BuiltinMethodType)
self.fn = fn
@staticmethod
def is_supported_builtin_method(obj: Any) -> bool:
method_self = obj.__self__
method_name = obj.__name__
# TODO(anijain2305) - Add support for more builtin methods
# Supports tuple.__new__ and frozenset({....}).__contains__
return (method_self is tuple and method_name == "__new__") or (
type(method_self) is frozenset and method_name == "__contains__"
)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
method_self = self.fn.__self__
name = self.fn.__name__
obj_source = self.source and AttrSource(self.source, "__self__")
obj_vt = VariableTracker.build(tx, method_self, obj_source)
return obj_vt.call_method(tx, name, args, kwargs)
| BuiltinMethodVariable |
python | Lightning-AI__lightning | tests/tests_fabric/utilities/test_data.py | {
"start": 4319,
"end": 4438
} | class ____(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| NoneDataLoader |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB180.py | {
"start": 783,
"end": 854
} | class ____(B0, abc.ABC, B1):
@abstractmethod
def foo(self): pass
| A7 |
python | PyCQA__pylint | doc/data/messages/i/init-is-generator/bad.py | {
"start": 0,
"end": 140
} | class ____:
def __init__(self, worms): # [init-is-generator]
yield from worms
apple = Fruit(["Fahad", "Anisha", "Tabatha"])
| Fruit |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/moderation.py | {
"start": 394,
"end": 4372
} | class ____(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the `openai` python package installed, and the
environment variable `OPENAI_API_KEY` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
```python
from langchain_classic.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
```
"""
client: Any = None
async_client: Any = None
model_name: str | None = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input"
output_key: str = "output"
openai_api_key: str | None = None
openai_organization: str | None = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError as e:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg) from e
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
return error_str
return text
@override
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
| OpenAIModerationChain |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/bigquery.py | {
"start": 62828,
"end": 63907
} | class ____(LoggingMixin):
"""
BigQuery cursor.
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
api_resource_configs: dict | None = None,
location: str | None = None,
num_retries: int = 5,
labels: dict | None = None,
) -> None:
super().__init__()
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs: dict = api_resource_configs or {}
self.running_job_id: str | None = None
self.location = location
self.num_retries = num_retries
self.labels = labels
self.hook = hook
| BigQueryBaseCursor |
python | ray-project__ray | python/ray/_private/thirdparty/dacite/exceptions.py | {
"start": 1073,
"end": 1317
} | class ____(DaciteFieldError):
def __init__(self, field_path: Optional[str] = None):
super().__init__(field_path=field_path)
def __str__(self) -> str:
return f'missing value for field "{self.field_path}"'
| MissingValueError |
python | apache__airflow | providers/asana/tests/unit/asana/hooks/test_asana.py | {
"start": 1016,
"end": 10962
} | class ____:
"""
Tests for AsanaHook Asana client retrieval
"""
def test_asana_client_retrieved(self):
"""
Test that we successfully retrieve an Asana client given a Connection with complete information.
:return: None
"""
with patch.object(
AsanaHook, "get_connection", return_value=Connection(conn_type="asana", password="test")
):
hook = AsanaHook()
client = hook.get_conn()
assert isinstance(client, ApiClient)
def test_missing_password_raises(self):
"""
Test that the Asana hook raises an exception if password not provided in connection.
:return: None
"""
with patch.object(AsanaHook, "get_connection", return_value=Connection(conn_type="asana")):
hook = AsanaHook()
with pytest.raises(ValueError, match="password"):
hook.get_conn()
def test_merge_create_task_parameters_default_project(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
do not override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1"]}
assert hook._merge_create_task_parameters("test", {}) == expected_merged_params
def test_merge_create_task_parameters_specified_project(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1", "2"]}
assert hook._merge_create_task_parameters("test", {"projects": ["1", "2"]}) == expected_merged_params
def test_merge_create_task_parameters_specified_workspace(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
do not override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "workspace": "1"}
assert hook._merge_create_task_parameters("test", {}) == expected_merged_params
def test_merge_create_task_parameters_default_project_overrides_default_workspace(self):
"""
Test that merge_create_task_parameters uses the default project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1", "extra__asana__project": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1"]}
assert hook._merge_create_task_parameters("test", {}) == expected_merged_params
def test_merge_create_task_parameters_specified_project_overrides_default_workspace(self):
"""
Test that merge_create_task_parameters uses the method parameter project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["2"]}
assert hook._merge_create_task_parameters("test", {"projects": ["2"]}) == expected_merged_params
def test_merge_find_task_parameters_default_project(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do not override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "1"}
assert hook._merge_find_task_parameters({}) == expected_merged_params
def test_merge_find_task_parameters_specified_project(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "2"}
assert hook._merge_find_task_parameters({"project": "2"}) == expected_merged_params
def test_merge_find_task_parameters_default_workspace(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do not override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "1", "assignee": "1"}
assert hook._merge_find_task_parameters({"assignee": "1"}) == expected_merged_params
def test_merge_find_task_parameters_specified_workspace(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "2", "assignee": "1"}
assert hook._merge_find_task_parameters({"workspace": "2", "assignee": "1"}) == expected_merged_params
def test_merge_find_task_parameters_default_project_overrides_workspace(self):
"""
Test that merge_find_task_parameters uses the default project over the workspace if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1", "extra__asana__project": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "1"}
assert hook._merge_find_task_parameters({}) == expected_merged_params
def test_merge_find_task_parameters_specified_project_overrides_workspace(self):
"""
Test that merge_find_task_parameters uses the method parameter project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "2"}
assert hook._merge_find_task_parameters({"project": "2"}) == expected_merged_params
def test_merge_project_parameters(self):
"""
Tests that default workspace is used if not overridden
:return:
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "1", "name": "name"}
assert hook._merge_project_parameters({"name": "name"}) == expected_merged_params
def test_merge_project_parameters_override(self):
"""
Tests that default workspace is successfully overridden
:return:
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "2"}
assert hook._merge_project_parameters({"workspace": "2"}) == expected_merged_params
@pytest.mark.parametrize(
"uri",
[
pytest.param(
"a://?extra__asana__workspace=abc&extra__asana__project=abc",
id="prefix",
),
pytest.param("a://?workspace=abc&project=abc", id="no-prefix"),
],
)
def test_backcompat_prefix_works(self, uri):
with patch.dict(os.environ, {"AIRFLOW_CONN_MY_CONN": uri}):
hook = AsanaHook("my_conn")
assert hook.workspace == "abc"
assert hook.project == "abc"
def test_backcompat_prefix_both_prefers_short(self):
with patch.dict(
os.environ,
{"AIRFLOW_CONN_MY_CONN": "a://?workspace=non-prefixed&extra__asana__workspace=prefixed"},
):
hook = AsanaHook("my_conn")
assert hook.workspace == "non-prefixed"
| TestAsanaHook |
python | scrapy__scrapy | tests/test_utils_signal.py | {
"start": 3100,
"end": 3363
} | class ____(TestSendCatchLogAsync):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
d = defer.Deferred()
call_later(0, d.callback, "OK")
return d
| TestSendCatchLogAsync2 |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 458332,
"end": 459035
} | class ____(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super().analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
| FuncLocalsExprNode |
python | python-pillow__Pillow | src/PIL/McIdasImagePlugin.py | {
"start": 617,
"end": 1877
} | class ____(ImageFile.ImageFile):
format = "MCIDAS"
format_description = "McIdas area file"
def _open(self) -> None:
# parse area file directory
assert self.fp is not None
s = self.fp.read(256)
if not _accept(s) or len(s) != 256:
msg = "not an McIdas area file"
raise SyntaxError(msg)
self.area_descriptor_raw = s
self.area_descriptor = w = [0, *struct.unpack("!64i", s)]
# get mode
if w[11] == 1:
mode = rawmode = "L"
elif w[11] == 2:
mode = rawmode = "I;16B"
elif w[11] == 4:
# FIXME: add memory map support
mode = "I"
rawmode = "I;32B"
else:
msg = "unsupported McIdas format"
raise SyntaxError(msg)
self._mode = mode
self._size = w[10], w[9]
offset = w[34] + w[15]
stride = w[15] + w[10] * w[11] * w[14]
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))
]
# --------------------------------------------------------------------
# registry
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
# no default extension
| McIdasImageFile |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 8090,
"end": 8168
} | class ____(Token):
__slots__ = ()
id = '<document end>'
| DocumentEndToken |
python | pennersr__django-allauth | allauth/socialaccount/providers/auth0/views.py | {
"start": 228,
"end": 1062
} | class ____(OAuth2Adapter):
provider_id = "auth0"
settings = app_settings.PROVIDERS.get(provider_id, {})
provider_base_url = settings.get("AUTH0_URL")
access_token_url = "{0}/oauth/token".format(provider_base_url)
authorize_url = "{0}/authorize".format(provider_base_url)
profile_url = "{0}/userinfo".format(provider_base_url)
def complete_login(self, request, app, token, response):
extra_data = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"access_token": token.token})
.json()
)
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(Auth0OAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(Auth0OAuth2Adapter)
| Auth0OAuth2Adapter |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/event_parser.py | {
"start": 1321,
"end": 1660
} | class ____:
canvas_sizes: list[int]
click_events: list[ClickEvent]
multiclick_events: list[MultiClickEvent]
hydration_errors: list[HydrationError]
mutation_events: list[MutationEvent]
options_events: list[dict[str, Any]]
request_response_sizes: list[tuple[Any, Any]]
tap_events: list[TapEvent]
| ParsedEventMeta |
python | numpy__numpy | numpy/polynomial/tests/test_legendre.py | {
"start": 784,
"end": 1082
} | class ____:
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self):
assert_equal(leg.legzero, [0])
def test_legone(self):
assert_equal(leg.legone, [1])
def test_legx(self):
assert_equal(leg.legx, [0, 1])
| TestConstants |
python | huggingface__transformers | tests/models/cohere/test_modeling_cohere.py | {
"start": 5841,
"end": 6852
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CohereModel, CohereForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": CohereModel,
"text-generation": CohereForCausalLM,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def setUp(self):
self.model_tester = CohereModelTester(self)
self.config_tester = ConfigTester(self, config_class=CohereConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@require_torch
@slow
| CohereModelTest |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 152394,
"end": 160552
} | class ____:
def test_invalid_distribution_points(self):
with pytest.raises(TypeError):
x509.FreshestCRL(
["notadistributionpoint"] # type:ignore[list-item]
)
def test_iter_len(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
]
)
assert len(fcrl) == 1
assert list(fcrl) == [
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
]
def test_iter_input(self):
points = [
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
]
fcrl = x509.FreshestCRL(iter(points))
assert list(fcrl) == points
def test_repr(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
None,
),
]
)
assert repr(fcrl) == (
"<FreshestCRL([<DistributionPoint(full_name=[<Unifo"
"rmResourceIdentifier(value='ftp://domain')>], relative"
"_name=None, reasons=frozenset({<ReasonFlags.key_compromise: "
"'keyCompromise'>}), crl_issuer=None)>])>"
)
def test_eq(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl2 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
assert fcrl == fcrl2
def test_ne(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl2 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain2")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl3 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl4 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing2")],
),
]
)
assert fcrl != fcrl2
assert fcrl != fcrl3
assert fcrl != fcrl4
assert fcrl != object()
def test_hash(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl2 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
fcrl3 = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
assert hash(fcrl) == hash(fcrl2)
assert hash(fcrl) != hash(fcrl3)
def test_indexing(self):
fcrl = x509.FreshestCRL(
[
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing2")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing3")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing4")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing5")],
),
]
)
assert fcrl[-1] == fcrl[4]
assert fcrl[2:6:2] == [fcrl[2], fcrl[4]]
def test_public_bytes(self):
ext = x509.FreshestCRL(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
None,
),
]
)
assert (
ext.public_bytes()
== b"0\x180\x16\xa0\x10\xa0\x0e\x86\x0cftp://domain\x81\x02\x06@"
)
| TestFreshestCRL |
python | readthedocs__readthedocs.org | readthedocs/config/models.py | {
"start": 1099,
"end": 1726
} | class ____(ConfigBaseModel):
"""Object used for `build.jobs` key."""
pre_checkout: list[str] = []
post_checkout: list[str] = []
pre_system_dependencies: list[str] = []
post_system_dependencies: list[str] = []
pre_create_environment: list[str] = []
create_environment: list[str] | None = None
post_create_environment: list[str] = []
pre_install: list[str] = []
install: list[str] | None = None
post_install: list[str] = []
pre_build: list[str] = []
build: BuildJobsBuildTypes = BuildJobsBuildTypes()
post_build: list[str] = []
# TODO: rename this class to `Build`
| BuildJobs |
python | tiangolo__fastapi | tests/test_response_model_as_return_annotation.py | {
"start": 356,
"end": 397
} | class ____(BaseUser):
surname: str
| User |
python | modin-project__modin | modin/conftest.py | {
"start": 5323,
"end": 6581
} | class ____(BaseQueryCompiler):
def __init__(self, modin_frame):
self._modin_frame = modin_frame
storage_format = property(
lambda self: "Base", doc=BaseQueryCompiler.storage_format.__doc__
)
engine = property(lambda self: "Python", doc=BaseQueryCompiler.engine.__doc__)
def finalize(self):
self._modin_frame.finalize()
def execute(self):
self.finalize()
self._modin_frame.wait_computations()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
def free(self):
pass
def to_interchange_dataframe(
self, nan_as_null: bool = False, allow_copy: bool = True
):
raise NotImplementedError(
"The selected execution does not implement the DataFrame exchange protocol."
)
@classmethod
def from_interchange_dataframe(cls, df, data_cls):
raise NotImplementedError(
"The selected execution does not implement the DataFrame exchange protocol."
)
to_pandas = PandasQueryCompiler.to_pandas
default_to_pandas = PandasQueryCompiler.default_to_pandas
| TestQC |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/decorator_location.py | {
"start": 2520,
"end": 3960
} | class ____:
def return_source(self) -> int:
return _test_source()
def identity(f: Callable) -> Callable:
# The return type is wrongly written as `Callable`.
@wraps(f)
def inner(*args, **kwargs) -> Callable:
return f(*args, **kwargs)
return inner
@identity
def return_foo() -> Foo:
return Foo()
def call_return_foo() -> None:
# TODO(T215330919): False negative
foo = return_foo()
_test_sink(foo.return_source())
def main() -> None:
# Properly finds all issues.
decorated_logging_logging2(_test_source())
# Does NOT find the issue (false negative).
decorated_skip_this_decorator(_test_source())
# Finds the issue to the decorator but not the inner function.
decorated_logging2_skip_this_decorator(_test_source())
# Properly finds the issue.
decorated_ignore_this_decorator(_test_source())
# Properly finds the issue.
decorated_ignore_this_decorator_factory(_test_source())
# Properly finds the issue.
decorated_ignore_this_decorator_class(_test_source())
# does NOT find the issue (false negative).
decorated_ignore_then_skip_decorator(_test_source())
# Properly finds all issues.
decorated_logging_ignore_this_decorator(_test_source())
# No issue because this `x` is not passed to `handle_request`.
handle_request("hello", _test_source(), 42)
handle_request(_test_source(), 42, 42)
call_return_foo()
| Foo |
python | django__django | tests/delete_regress/models.py | {
"start": 1889,
"end": 2051
} | class ____(models.Model):
food = models.ForeignKey(Food, models.CASCADE, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
| Eaten |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 10050,
"end": 12178
} | class ____(AbstractPathEffect):
"""A simple shadow via a line."""
def __init__(self, offset=(2, -2),
shadow_color='k', alpha=0.3, rho=0.3, **kwargs):
"""
Parameters
----------
offset : (float, float), default: (2, -2)
The (x, y) offset to apply to the path, in points.
shadow_color : :mpltype:`color`, default: 'black'
The shadow color.
A value of ``None`` takes the original artist's color
with a scale factor of *rho*.
alpha : float, default: 0.3
The alpha transparency of the created shadow patch.
rho : float, default: 0.3
A scale factor to apply to the rgbFace color if *shadow_color*
is ``None``.
**kwargs
Extra keywords are stored and passed through to
:meth:`!AbstractPathEffect._update_gc`.
"""
super().__init__(offset)
if shadow_color is None:
self._shadow_color = shadow_color
else:
self._shadow_color = mcolors.to_rgba(shadow_color)
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
if self._shadow_color is None:
r, g, b = (gc0.get_foreground() or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_color
gc0.set_foreground(shadow_rgbFace)
gc0.set_alpha(self._alpha)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer))
gc0.restore()
| SimpleLineShadow |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 79042,
"end": 79589
} | class ____(GeoJsonBaseField):
"""A GeoJSON field storing a polygon of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{'type' : 'Polygon' ,
'coordinates' : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list
of LineStrings. The first LineString being the outside and the rest being
holes.
Requires mongodb >= 2.4
"""
_type = "Polygon"
| PolygonField |
python | django-debug-toolbar__django-debug-toolbar | tests/test_csp_rendering.py | {
"start": 1725,
"end": 8101
} | class ____(IntegrationTestCase):
"""Testing if `csp-nonce` renders."""
def setUp(self):
super().setUp()
self.parser = HTMLParser()
def _fail_if_missing(self, root, path, namespaces, nonce):
"""
Search elements, fail if a `nonce` attribute is missing on them.
"""
elements = root.findall(path=path, namespaces=namespaces)
for item in elements:
if item.attrib.get("nonce") != nonce:
raise self.failureException(f"{item} has no nonce attribute.")
def _fail_if_found(self, root, path, namespaces):
"""
Search elements, fail if a `nonce` attribute is found on them.
"""
elements = root.findall(path=path, namespaces=namespaces)
for item in elements:
if "nonce" in item.attrib:
raise self.failureException(f"{item} has a nonce attribute.")
def _fail_on_invalid_html(self, content, parser):
"""Fail if the passed HTML is invalid."""
if parser.errors:
default_msg = ["Content is invalid HTML:"]
lines = content.split(b"\n")
for position, error_code, data_vars in parser.errors:
default_msg.append(f" {E[error_code]}" % data_vars)
default_msg.append(f" {lines[position[0] - 1]!r}")
msg = self._formatMessage(None, "\n".join(default_msg))
raise self.failureException(msg)
def test_exists(self):
"""A `nonce` should exist when using the `CSPMiddleware`."""
for middleware in VALID_MIDDLEWARE_VARIATIONS:
with self.settings(MIDDLEWARE=middleware):
response = self.client.get(path="/csp_view/")
self.assertEqual(response.status_code, 200)
html_root = self.parser.parse(stream=response.content)
self._fail_on_invalid_html(content=response.content, parser=self.parser)
self.assertContains(response, "djDebug")
namespaces = get_namespaces(element=html_root)
nonce = get_csp_nonce(response.context["request"])
assert nonce is not None
self._fail_if_missing(
root=html_root, path=".//link", namespaces=namespaces, nonce=nonce
)
self._fail_if_missing(
root=html_root, path=".//script", namespaces=namespaces, nonce=nonce
)
def test_does_not_exist_nonce_wasnt_used(self):
"""
A `nonce` should not exist even when using the `CSPMiddleware`
if the view didn't access the request's CSP nonce.
"""
for middleware in VALID_MIDDLEWARE_VARIATIONS:
with self.settings(MIDDLEWARE=middleware):
response = self.client.get(path="/regular/basic/")
self.assertEqual(response.status_code, 200)
html_root = self.parser.parse(stream=response.content)
self._fail_on_invalid_html(content=response.content, parser=self.parser)
self.assertContains(response, "djDebug")
namespaces = get_namespaces(element=html_root)
self._fail_if_found(
root=html_root, path=".//link", namespaces=namespaces
)
self._fail_if_found(
root=html_root, path=".//script", namespaces=namespaces
)
@override_settings(
DEBUG_TOOLBAR_CONFIG={"DISABLE_PANELS": set()},
)
def test_redirects_exists(self):
for middleware in VALID_MIDDLEWARE_VARIATIONS:
with self.settings(MIDDLEWARE=middleware):
response = self.client.get(path="/csp_view/")
self.assertEqual(response.status_code, 200)
html_root = self.parser.parse(stream=response.content)
self._fail_on_invalid_html(content=response.content, parser=self.parser)
self.assertContains(response, "djDebug")
namespaces = get_namespaces(element=html_root)
context = response.context
nonce = str(context["toolbar"].csp_nonce)
self._fail_if_missing(
root=html_root, path=".//link", namespaces=namespaces, nonce=nonce
)
self._fail_if_missing(
root=html_root, path=".//script", namespaces=namespaces, nonce=nonce
)
def test_panel_content_nonce_exists(self):
store = get_store()
for middleware in VALID_MIDDLEWARE_VARIATIONS:
with self.settings(MIDDLEWARE=middleware):
response = self.client.get(path="/csp_view/")
self.assertEqual(response.status_code, 200)
request_ids = list(store.request_ids())
toolbar = DebugToolbar.fetch(request_ids[-1])
panels_to_check = ["HistoryPanel", "TimerPanel"]
for panel in panels_to_check:
content = toolbar.get_panel_by_id(panel).content
html_root = self.parser.parse(stream=content)
namespaces = get_namespaces(element=html_root)
nonce = str(toolbar.csp_nonce)
self._fail_if_missing(
root=html_root,
path=".//link",
namespaces=namespaces,
nonce=nonce,
)
self._fail_if_missing(
root=html_root,
path=".//script",
namespaces=namespaces,
nonce=nonce,
)
def test_missing(self):
"""A `nonce` should not exist when not using the `CSPMiddleware`."""
response = self.client.get(path="/regular/basic/")
self.assertEqual(response.status_code, 200)
html_root = self.parser.parse(stream=response.content)
self._fail_on_invalid_html(content=response.content, parser=self.parser)
self.assertContains(response, "djDebug")
namespaces = get_namespaces(element=html_root)
self._fail_if_found(root=html_root, path=".//link", namespaces=namespaces)
self._fail_if_found(root=html_root, path=".//script", namespaces=namespaces)
| CspRenderingTestCase |
python | kubernetes-client__python | kubernetes/client/api/resource_v1beta1_api.py | {
"start": 543,
"end": 450723
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_device_class(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_device_class_with_http_info(body, **kwargs) # noqa: E501
def create_device_class_with_http_info(self, body, **kwargs): # noqa: E501
"""create_device_class # noqa: E501
create a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_class_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim # noqa: E501
create a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_resource_claim_template(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_resource_claim_template_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_resource_claim_template_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_claim_template # noqa: E501
create a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_claim_template_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_resource_slice(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_resource_slice_with_http_info(body, **kwargs) # noqa: E501
def create_resource_slice_with_http_info(self, body, **kwargs): # noqa: E501
"""create_resource_slice # noqa: E501
create a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_resource_slice_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_device_class(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_device_class_with_http_info(**kwargs) # noqa: E501
def delete_collection_device_class_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_device_class # noqa: E501
delete collection of DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim # noqa: E501
delete collection of ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_resource_claim_template # noqa: E501
delete collection of ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_resource_slice(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_resource_slice_with_http_info(**kwargs) # noqa: E501
def delete_collection_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_resource_slice # noqa: E501
delete collection of ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_device_class(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_device_class_with_http_info(name, **kwargs) # noqa: E501
def delete_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_device_class # noqa: E501
delete a DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim # noqa: E501
delete a ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_resource_claim_template # noqa: E501
delete a ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_resource_slice(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def delete_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_resource_slice # noqa: E501
delete a ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_device_class(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_device_class_with_http_info(**kwargs) # noqa: E501
def list_device_class_with_http_info(self, **kwargs): # noqa: E501
"""list_device_class # noqa: E501
list or watch objects of kind DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_device_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_resource_claim_template # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceclaims', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_claim_template_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_claim_template_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_resource_claim_template_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_claim_template_for_all_namespaces # noqa: E501
list or watch objects of kind ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_claim_template_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_claim_template_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceclaimtemplates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_resource_slice(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSliceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_resource_slice_with_http_info(**kwargs) # noqa: E501
def list_resource_slice_with_http_info(self, **kwargs): # noqa: E501
"""list_resource_slice # noqa: E501
list or watch objects of kind ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_slice_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSliceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSliceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_device_class(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def patch_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_device_class # noqa: E501
partially update the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim # noqa: E501
partially update the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_status # noqa: E501
partially update status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_claim_template # noqa: E501
partially update the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_resource_slice(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def patch_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_resource_slice # noqa: E501
partially update the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_device_class(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_device_class_with_http_info(name, **kwargs) # noqa: E501
def read_device_class_with_http_info(self, name, **kwargs): # noqa: E501
"""read_device_class # noqa: E501
read the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_device_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim # noqa: E501
read the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_status # noqa: E501
read status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_resource_claim_template # noqa: E501
read the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_resource_slice(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_resource_slice_with_http_info(name, **kwargs) # noqa: E501
def read_resource_slice_with_http_info(self, name, **kwargs): # noqa: E501
"""read_resource_slice # noqa: E501
read the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_resource_slice_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_device_class(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1beta1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1DeviceClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_device_class_with_http_info(name, body, **kwargs) # noqa: E501
def replace_device_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_device_class # noqa: E501
replace the specified DeviceClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_device_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the DeviceClass (required)
:param V1beta1DeviceClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1DeviceClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_device_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_device_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_device_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/deviceclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1DeviceClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim # noqa: E501
replace the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_status # noqa: E501
replace status of the specified ResourceClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaim body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaim, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaims/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaim', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceClaimTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_resource_claim_template # noqa: E501
replace the specified ResourceClaimTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceClaimTemplate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ResourceClaimTemplate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_resource_claim_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceClaimTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_resource_slice(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1beta1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ResourceSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_resource_slice_with_http_info(name, body, **kwargs) # noqa: E501
def replace_resource_slice_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_resource_slice # noqa: E501
replace the specified ResourceSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_resource_slice_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ResourceSlice (required)
:param V1beta1ResourceSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ResourceSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_resource_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_resource_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_resource_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/v1beta1/resourceslices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ResourceSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| ResourceV1beta1Api |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition.py | {
"start": 215,
"end": 2142
} | class ____(TestCase):
def setUp(self) -> None:
self.slow_config = {
"interval": "1d",
"value": 7,
}
def test_simple(self) -> None:
conditions = [
self.create_data_condition(type=Condition.EQUAL), # fast
self.create_data_condition(type=Condition.EQUAL), # fast
self.create_data_condition(
type=Condition.EVENT_FREQUENCY_COUNT, comparison=self.slow_config
), # slow
]
fast_conditions, slow_conditions = split_conditions_by_speed(conditions)
assert fast_conditions == [conditions[0], conditions[1]]
assert slow_conditions == [conditions[2]]
def test_only_fast_conditions(self) -> None:
conditions = [
self.create_data_condition(type=Condition.EQUAL), # fast
self.create_data_condition(type=Condition.EQUAL), # fast
]
fast_conditions, slow_conditions = split_conditions_by_speed(conditions)
assert fast_conditions == [conditions[0], conditions[1]]
assert slow_conditions == []
def test_only_slow_conditions(self) -> None:
conditions = [
self.create_data_condition(
type=Condition.EVENT_FREQUENCY_COUNT, comparison=self.slow_config
), # slow
self.create_data_condition(
type=Condition.EVENT_FREQUENCY_COUNT, comparison=self.slow_config
), # slow
]
fast_conditions, slow_conditions = split_conditions_by_speed(conditions)
assert slow_conditions == [conditions[0], conditions[1]]
assert fast_conditions == []
def test_no_conditions(self) -> None:
conditions: list[DataCondition] = []
fast_conditions, slow_conditions = split_conditions_by_speed(conditions)
assert fast_conditions == []
assert slow_conditions == []
| SplitConditionsBySpeedTest |
python | numba__numba | numba/tests/test_analysis.py | {
"start": 19917,
"end": 30814
} | class ____(TestBranchPruneBase, SerialMixin):
# Really important thing to remember... the branch on predicates end up as
# POP_JUMP_IF_<bool> and the targets are backwards compared to normal, i.e.
# the true condition is far jump and the false the near i.e. `if x` would
# end up in Numba IR as e.g. `branch x 10, 6`.
_TRUTHY = (1, "String", True, 7.4, 3j)
_FALSEY = (0, "", False, 0.0, 0j, None)
def _literal_const_sample_generator(self, pyfunc, consts):
"""
This takes a python function, pyfunc, and manipulates its co_const
__code__ member to create a new function with different co_consts as
supplied in argument consts.
consts is a dict {index: value} of co_const tuple index to constant
value used to update a pyfunc clone's co_const.
"""
pyfunc_code = pyfunc.__code__
# translate consts spec to update the constants
co_consts = {k: v for k, v in enumerate(pyfunc_code.co_consts)}
for k, v in consts.items():
co_consts[k] = v
new_consts = tuple([v for _, v in sorted(co_consts.items())])
# create code object with mutation
new_code = pyfunc_code.replace(co_consts=new_consts)
# get function
return pytypes.FunctionType(new_code, globals())
def test_literal_const_code_gen(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
else:
_CONST2 = "PLACEHOLDER2"
return _CONST2 + 4
if PYVERSION in ((3, 14), ):
# The order of the __code__.co_consts changes with 3.14
new = self._literal_const_sample_generator(impl, {0:0, 2:20})
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
new = self._literal_const_sample_generator(impl, {1:0, 3:20})
else:
raise NotImplementedError(PYVERSION)
iconst = impl.__code__.co_consts
nconst = new.__code__.co_consts
if PYVERSION in ((3, 14), ):
self.assertEqual(iconst, ("PLACEHOLDER1", 3.14159,
"PLACEHOLDER2"))
self.assertEqual(nconst, (0, 3.14159, 20))
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
self.assertEqual(iconst, (None, "PLACEHOLDER1", 3.14159,
"PLACEHOLDER2", 4))
self.assertEqual(nconst, (None, 0, 3.14159, 20, 4))
else:
raise NotImplementedError(PYVERSION)
self.assertEqual(impl(None), 3.14159)
self.assertEqual(new(None), 24)
def test_single_if_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
if PYVERSION in ((3, 14), ):
# The order of the __code__.co_consts changes with 3.14
func = self._literal_const_sample_generator(impl,
{0: const})
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
func = self._literal_const_sample_generator(impl,
{1: const})
else:
raise NotImplementedError(PYVERSION)
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if not _CONST1:
return 3.14159
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
if PYVERSION in ((3, 14), ):
# The order of the __code__.co_consts changes with 3.14
func = self._literal_const_sample_generator(impl,
{0: const})
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
func = self._literal_const_sample_generator(impl,
{1: const})
else:
raise NotImplementedError(PYVERSION)
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
else:
return 1.61803
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
if PYVERSION in ((3, 14), ):
# The order of the __code__.co_consts changes with 3.14
func = self._literal_const_sample_generator(impl,
{0: const})
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
func = self._literal_const_sample_generator(impl,
{1: const})
else:
raise NotImplementedError(PYVERSION)
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if not _CONST1:
return 3.14159
else:
return 1.61803
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
if PYVERSION in ((3, 14), ):
# The order of the __code__.co_consts changes with 3.14
func = self._literal_const_sample_generator(impl,
{0: const})
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
func = self._literal_const_sample_generator(impl,
{1: const})
else:
raise NotImplementedError(PYVERSION)
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if const:
return 3.14159, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if not const:
return 3.14159, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if const:
return 3.14159, const
else:
return 1.61803, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if not const:
return 3.14159, const
else:
return 1.61803, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
# globals in this section have absurd names after their test usecase names
# so as to prevent collisions and permit tests to run in parallel
def test_single_if_global(self):
global c_test_single_if_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_global = c
def func(x):
if c_test_single_if_global:
return 3.14159, c_test_single_if_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_global(self):
global c_test_single_if_negate_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_negate_global = c
def func(x):
if c_test_single_if_negate_global:
return 3.14159, c_test_single_if_negate_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_global(self):
global c_test_single_if_else_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_else_global = c
def func(x):
if c_test_single_if_else_global:
return 3.14159, c_test_single_if_else_global
else:
return 1.61803, c_test_single_if_else_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_global(self):
global c_test_single_if_else_negate_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_else_negate_global = c
def func(x):
if not c_test_single_if_else_negate_global:
return 3.14159, c_test_single_if_else_negate_global
else:
return 1.61803, c_test_single_if_else_negate_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_issue_5618(self):
@njit
def foo():
values = np.zeros(1)
tmp = 666
if tmp:
values[0] = tmp
return values
self.assertPreciseEqual(foo.py_func()[0], 666.)
self.assertPreciseEqual(foo()[0], 666.)
| TestBranchPrunePredicates |
python | django__django | tests/gis_tests/gdal_tests/test_srs.py | {
"start": 242,
"end": 8393
} | class ____:
def __init__(self, wkt, **kwargs):
self.wkt = wkt
for key, value in kwargs.items():
setattr(self, key, value)
# Some Spatial Reference examples
srlist = (
TestSRS(
'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,'
'AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",'
'0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],'
'AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]',
epsg=4326,
projected=False,
geographic=True,
local=False,
lin_name="unknown",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
auth={
None: ("EPSG", "4326"), # Top-level authority.
"GEOGCS": ("EPSG", "4326"),
"spheroid": ("EPSG", "7030"),
},
attr=(
("DATUM", "WGS_1984"),
(("SPHEROID", 1), "6378137"),
("primem|authority", "EPSG"),
),
),
TestSRS(
'PROJCS["NAD83 / Texas South Central",'
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["standard_parallel_1",30.2833333333333],'
'PARAMETER["standard_parallel_2",28.3833333333333],'
'PARAMETER["latitude_of_origin",27.8333333333333],'
'PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],'
'PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32140"]]',
epsg=32140,
projected=True,
geographic=False,
local=False,
lin_name="metre",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
auth={
None: ("EPSG", "32140"), # Top-level authority.
"PROJCS": ("EPSG", "32140"),
"spheroid": ("EPSG", "7019"),
"unit": ("EPSG", "9001"),
},
attr=(
("DATUM", "North_American_Datum_1983"),
(("SPHEROID", 2), "298.257222101"),
("PROJECTION", "Lambert_Conformal_Conic_2SP"),
),
),
TestSRS(
'PROJCS["NAD83 / Texas South Central (ftUS)",'
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],'
'PRIMEM["Greenwich",0],'
'UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["false_easting",1968500],'
'PARAMETER["false_northing",13123333.3333333],'
'PARAMETER["central_meridian",-99],'
'PARAMETER["standard_parallel_1",28.3833333333333],'
'PARAMETER["standard_parallel_2",30.2833333333333],'
'PARAMETER["latitude_of_origin",27.8333333333333],'
'UNIT["US survey foot",0.304800609601219],AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]',
epsg=None,
projected=True,
geographic=False,
local=False,
lin_name="US survey foot",
ang_name="Degree",
lin_units=0.3048006096012192,
ang_units=0.0174532925199,
auth={
None: (None, None), # Top-level authority.
"PROJCS": (None, None),
},
attr=(
("PROJCS|GeOgCs|spheroid", "GRS 1980"),
(("projcs", 9), "UNIT"),
(("projcs", 11), "AXIS"),
),
),
# This is really ESRI format, not WKT -- but the import should work the
# same
TestSRS(
'LOCAL_CS["Non-Earth (Meter)",LOCAL_DATUM["Local Datum",32767],'
'UNIT["Meter",1],AXIS["X",EAST],AXIS["Y",NORTH]]',
esri=True,
epsg=None,
projected=False,
geographic=False,
local=True,
lin_name="Meter",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
attr=(("LOCAL_DATUM", "Local Datum"),),
),
)
# Well-Known Names
well_known = (
TestSRS(
'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,'
'AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,'
'AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
wk="WGS84",
name="WGS 84",
attrs=(("GEOGCS|AUTHORITY", 1, "4326"), ("SPHEROID", "WGS 84")),
),
TestSRS(
'GEOGCS["WGS 72",DATUM["WGS_1972",SPHEROID["WGS 72",6378135,298.26,'
'AUTHORITY["EPSG","7043"]],AUTHORITY["EPSG","6322"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4322"]]',
wk="WGS72",
name="WGS 72",
attrs=(("GEOGCS|AUTHORITY", 1, "4322"), ("SPHEROID", "WGS 72")),
),
TestSRS(
'GEOGCS["NAD27",DATUM["North_American_Datum_1927",'
'SPHEROID["Clarke 1866",6378206.4,294.9786982138982,'
'AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4267"]]',
wk="NAD27",
name="NAD27",
attrs=(("GEOGCS|AUTHORITY", 1, "4267"), ("SPHEROID", "Clarke 1866")),
),
TestSRS(
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,'
'AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]]',
wk="NAD83",
name="NAD83",
attrs=(("GEOGCS|AUTHORITY", 1, "4269"), ("SPHEROID", "GRS 1980")),
),
TestSRS(
'PROJCS["NZGD49 / Karamea Circuit",GEOGCS["NZGD49",'
'DATUM["New_Zealand_Geodetic_Datum_1949",'
'SPHEROID["International 1924",6378388,297,'
'AUTHORITY["EPSG","7022"]],'
"TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],"
'AUTHORITY["EPSG","6272"]],PRIMEM["Greenwich",0,'
'AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,'
'AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4272"]],'
'PROJECTION["Transverse_Mercator"],'
'PARAMETER["latitude_of_origin",-41.28991152777778],'
'PARAMETER["central_meridian",172.1090281944444],'
'PARAMETER["scale_factor",1],PARAMETER["false_easting",300000],'
'PARAMETER["false_northing",700000],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","27216"]]',
wk="EPSG:27216",
name="NZGD49 / Karamea Circuit",
attrs=(
("PROJECTION", "Transverse_Mercator"),
("SPHEROID", "International 1924"),
),
),
)
bad_srlist = (
"Foobar",
'OOJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["standard_parallel_1",30.28333333333333],'
'PARAMETER["standard_parallel_2",28.38333333333333],'
'PARAMETER["latitude_of_origin",27.83333333333333],'
'PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],'
'PARAMETER["false_northing",4000000],UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
)
| TestSRS |
python | langchain-ai__langchain | libs/langchain/langchain_classic/output_parsers/pandas_dataframe.py | {
"start": 348,
"end": 7019
} | class ____(BaseOutputParser[dict[str, Any]]):
"""Parse an output using Pandas DataFrame format."""
"""The Pandas DataFrame to parse."""
dataframe: Any
@field_validator("dataframe")
@classmethod
def _validate_dataframe(cls, val: Any) -> Any:
import pandas as pd
if issubclass(type(val), pd.DataFrame):
return val
if pd.DataFrame(val).empty:
msg = "DataFrame cannot be empty."
raise ValueError(msg)
msg = "Wrong type for 'dataframe', must be a subclass \
of Pandas DataFrame (pd.DataFrame)"
raise TypeError(msg)
def parse_array(
self,
array: str,
original_request_params: str,
) -> tuple[list[int | str], str]:
"""Parse the array from the request parameters.
Args:
array: The array string to parse.
original_request_params: The original request parameters string.
Returns:
A tuple containing the parsed array and the stripped request parameters.
Raises:
OutputParserException: If the array format is invalid or cannot be parsed.
"""
parsed_array: list[int | str] = []
# Check if the format is [1,3,5]
if re.match(r"\[\d+(,\s*\d+)*\]", array):
parsed_array = [int(i) for i in re.findall(r"\d+", array)]
# Check if the format is [1..5]
elif re.match(r"\[(\d+)\.\.(\d+)\]", array):
match = re.match(r"\[(\d+)\.\.(\d+)\]", array)
if match:
start, end = map(int, match.groups())
parsed_array = list(range(start, end + 1))
else:
msg = f"Unable to parse the array provided in {array}. \
Please check the format instructions."
raise OutputParserException(msg)
# Check if the format is ["column_name"]
elif re.match(r"\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\]", array):
match = re.match(r"\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\]", array)
if match:
parsed_array = list(map(str, match.group().strip("[]").split(",")))
else:
msg = f"Unable to parse the array provided in {array}. \
Please check the format instructions."
raise OutputParserException(msg)
# Validate the array
if not parsed_array:
msg = f"Invalid array format in '{original_request_params}'. \
Please check the format instructions."
raise OutputParserException(msg)
if (
isinstance(parsed_array[0], int)
and parsed_array[-1] > self.dataframe.index.max()
):
msg = f"The maximum index {parsed_array[-1]} exceeds the maximum index of \
the Pandas DataFrame {self.dataframe.index.max()}."
raise OutputParserException(msg)
return parsed_array, original_request_params.split("[")[0]
@override
def parse(self, request: str) -> dict[str, Any]:
stripped_request_params = None
splitted_request = request.strip().split(":")
if len(splitted_request) != 2: # noqa: PLR2004
msg = f"Request '{request}' is not correctly formatted. \
Please refer to the format instructions."
raise OutputParserException(msg)
result = {}
try:
request_type, request_params = splitted_request
if request_type in {"Invalid column", "Invalid operation"}:
msg = f"{request}. Please check the format instructions."
raise OutputParserException(msg)
array_exists = re.search(r"(\[.*?\])", request_params)
if array_exists:
parsed_array, stripped_request_params = self.parse_array(
array_exists.group(1),
request_params,
)
if request_type == "column":
filtered_df = self.dataframe[
self.dataframe.index.isin(parsed_array)
]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df[
stripped_request_params
].iloc[parsed_array[0]]
else:
result[stripped_request_params] = filtered_df[
stripped_request_params
]
elif request_type == "row":
filtered_df = self.dataframe[
self.dataframe.columns.intersection(parsed_array)
]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df.iloc[
int(stripped_request_params)
][parsed_array[0]]
else:
result[stripped_request_params] = filtered_df.iloc[
int(stripped_request_params)
]
else:
filtered_df = self.dataframe[
self.dataframe.index.isin(parsed_array)
]
result[request_type] = getattr(
filtered_df[stripped_request_params],
request_type,
)()
elif request_type == "column":
result[request_params] = self.dataframe[request_params]
elif request_type == "row":
result[request_params] = self.dataframe.iloc[int(request_params)]
else:
result[request_type] = getattr(
self.dataframe[request_params],
request_type,
)()
except (AttributeError, IndexError, KeyError) as e:
if request_type not in {"column", "row"}:
msg = f"Unsupported request type '{request_type}'. \
Please check the format instructions."
raise OutputParserException(msg) from e
msg = f"""Requested index {
request_params
if stripped_request_params is None
else stripped_request_params
} is out of bounds."""
raise OutputParserException(msg) from e
return result
@override
def get_format_instructions(self) -> str:
return PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS.format(
columns=", ".join(self.dataframe.columns),
)
| PandasDataFrameOutputParser |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.