language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/0900-0999/0937.Reorder Data in Log Files/Solution.py | {
"start": 0,
"end": 245
} | class ____:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
def f(log: str):
id_, rest = log.split(" ", 1)
return (0, rest, id_) if rest[0].isalpha() else (1,)
return sorted(logs, key=f)
| Solution |
python | sympy__sympy | sympy/physics/quantum/state.py | {
"start": 1608,
"end": 7555
} | class ____(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
def _apply_operator(self, op, **options):
return None
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construct the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = getattr(self, 'lbracket_ucode', ""), getattr(self, 'rbracket_ucode', "")
slash, bslash, vert = '\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}', \
'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}', \
'\N{BOX DRAWINGS LIGHT VERTICAL}'
else:
lbracket, rbracket = getattr(self, 'lbracket', ""), getattr(self, 'rbracket', "")
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in {_lbracket, _lbracket_ucode}:
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in {_rbracket, _rbracket_ucode}:
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in {_straight_bracket, _straight_bracket_ucode}:
bracket_args = [vert] * height
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (getattr(self, 'lbracket', ""), contents, getattr(self, 'rbracket', ""))
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (getattr(self, 'lbracket_latex', ""), contents, getattr(self, 'rbracket_latex', ""))
| StateBase |
python | tornadoweb__tornado | tornado/httpclient.py | {
"start": 28784,
"end": 30195
} | class ____(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
.. versionchanged:: 5.1
Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with
`tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains
as an alias.
"""
def __init__(
self,
code: int,
message: Optional[str] = None,
response: Optional[HTTPResponse] = None,
) -> None:
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super().__init__(code, message, response)
def __str__(self) -> str:
return "HTTP %d: %s" % (self.code, self.message)
# There is a cyclic reference between self and self.response,
# which breaks the default __repr__ implementation.
# (especially on pypy, which doesn't have the same recursion
# detection as cpython).
__repr__ = __str__
HTTPError = HTTPClientError
| HTTPClientError |
python | python-pillow__Pillow | src/PIL/GribStubImagePlugin.py | {
"start": 720,
"end": 1759
} | class ____(ImageFile.StubImageFile):
format = "GRIB"
format_description = "GRIB"
def _open(self) -> None:
if not _accept(self.fp.read(8)):
msg = "Not a GRIB file"
raise SyntaxError(msg)
self.fp.seek(-8, os.SEEK_CUR)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "GRIB save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
Image.register_save(GribStubImageFile.format, _save)
Image.register_extension(GribStubImageFile.format, ".grib")
| GribStubImageFile |
python | numpy__numpy | tools/swig/test/testVector.py | {
"start": 13556,
"end": 15145
} | class ____(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase))
# Execute the test suite
print("Testing 1D Functions of Module Vector")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| doubleTestCase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_boolean_trap/FBT.py | {
"start": 2807,
"end": 3312
} | class ____:
force: InitVar[bool] = False
def __post_init__(self, force: bool) -> None:
print(force)
Fit(force=True)
# https://github.com/astral-sh/ruff/issues/10356
from django.db.models import Case, Q, Value, When
qs.annotate(
is_foo_or_bar=Case(
When(Q(is_foo=True) | Q(is_bar=True)),
then=Value(True),
),
default=Value(False),
)
# https://github.com/astral-sh/ruff/issues/10485
from pydantic import Field
from pydantic_settings import BaseSettings
| Fit |
python | realpython__materials | hashtable/04_load_factor/hashtable.py | {
"start": 127,
"end": 3698
} | class ____:
@classmethod
def from_dict(cls, dictionary, capacity=None):
hash_table = cls(capacity or len(dictionary))
for key, value in dictionary.items():
hash_table[key] = value
return hash_table
def __init__(self, capacity=8, load_factor_threshold=0.6):
if capacity < 1:
raise ValueError("Capacity must be a positive number")
if not (0 < load_factor_threshold <= 1):
raise ValueError("Load factor must be a number between (0, 1]")
self._slots = capacity * [None]
self._load_factor_threshold = load_factor_threshold
def __len__(self):
return len(self.pairs)
def __iter__(self):
yield from self.keys
def __delitem__(self, key):
for index, pair in self._probe(key):
if pair is None:
raise KeyError(key)
if pair is DELETED:
continue
if pair.key == key:
self._slots[index] = DELETED
break
else:
raise KeyError(key)
def __setitem__(self, key, value):
if self.load_factor >= self._load_factor_threshold:
self._resize_and_rehash()
for index, pair in self._probe(key):
if pair is DELETED:
continue
if pair is None or pair.key == key:
self._slots[index] = Pair(key, value)
break
def __getitem__(self, key):
for _, pair in self._probe(key):
if pair is None:
raise KeyError(key)
if pair is DELETED:
continue
if pair.key == key:
return pair.value
raise KeyError(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __eq__(self, other):
if self is other:
return True
if type(self) is not type(other):
return False
return set(self.pairs) == set(other.pairs)
def __str__(self):
pairs = []
for key, value in self.pairs:
pairs.append(f"{key!r}: {value!r}")
return "{" + ", ".join(pairs) + "}"
def __repr__(self):
cls = self.__class__.__name__
return f"{cls}.from_dict({str(self)})"
def copy(self):
return HashTable.from_dict(dict(self.pairs), self.capacity)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
@property
def pairs(self):
return {pair for pair in self._slots if pair not in (None, DELETED)}
@property
def values(self):
return [pair.value for pair in self.pairs]
@property
def keys(self):
return {pair.key for pair in self.pairs}
@property
def capacity(self):
return len(self._slots)
@property
def load_factor(self):
occupied_or_deleted = [slot for slot in self._slots if slot]
return len(occupied_or_deleted) / self.capacity
def _index(self, key):
return hash(key) % self.capacity
def _probe(self, key):
index = self._index(key)
for _ in range(self.capacity):
yield index, self._slots[index]
index = (index + 1) % self.capacity
def _resize_and_rehash(self):
copy = HashTable(capacity=self.capacity * 2)
for key, value in self.pairs:
copy[key] = value
self._slots = copy._slots
| HashTable |
python | django__django | tests/admin_views/models.py | {
"start": 25343,
"end": 25542
} | class ____(models.Model):
work_at = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
| Worker |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/core.py | {
"start": 67007,
"end": 100219
} | class ____:
"""This object is provided as the .hypothesis attribute on @given tests.
Downstream users can reassign its attributes to insert custom logic into
the execution of each case, for example by converting an async into a
sync function.
This must be an attribute of an attribute, because reassignment of a
first-level attribute would not be visible to Hypothesis if the function
had been decorated before the assignment.
See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more
information.
"""
inner_test: Any
_get_fuzz_target: Any
_given_kwargs: Any
@property
def fuzz_one_input(
self,
) -> Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]:
"""
Run the test as a fuzz target, driven with the ``buffer`` of bytes.
Depending on the passed ``buffer`` one of three things will happen:
* If the bytestring was invalid, for example because it was too short or was
filtered out by |assume| or |.filter|, |fuzz_one_input| returns ``None``.
* If the bytestring was valid and the test passed, |fuzz_one_input| returns
a canonicalised and pruned bytestring which will replay that test case.
This is provided as an option to improve the performance of mutating
fuzzers, but can safely be ignored.
* If the test *failed*, i.e. raised an exception, |fuzz_one_input| will
add the pruned buffer to :ref:`the Hypothesis example database <database>`
and then re-raise that exception. All you need to do to reproduce,
minimize, and de-duplicate all the failures found via fuzzing is run
your test suite!
To reduce the performance impact of database writes, |fuzz_one_input| only
records failing inputs which would be valid shrinks for a known failure -
meaning writes are somewhere between constant and log(N) rather than linear
in runtime. However, this tracking only works within a persistent fuzzing
process; for forkserver fuzzers we recommend ``database=None`` for the main
run, and then replaying with a database enabled if you need to analyse
failures.
Note that the interpretation of both input and output bytestrings is
specific to the exact version of Hypothesis you are using and the strategies
given to the test, just like the :ref:`database <database>` and
|@reproduce_failure|.
Interaction with |@settings|
----------------------------
|fuzz_one_input| uses just enough of Hypothesis' internals to drive your
test function with a bytestring, and most settings therefore have no effect
in this mode. We recommend running your tests the usual way before fuzzing
to get the benefits of health checks, as well as afterwards to replay,
shrink, deduplicate, and report whatever errors were discovered.
* |settings.database| *is* used by |fuzz_one_input| - adding failures to
the database to be replayed when
you next run your tests is our preferred reporting mechanism and response
to `the 'fuzzer taming' problem <https://blog.regehr.org/archives/925>`__.
* |settings.verbosity| and |settings.stateful_step_count| work as usual.
* The |~settings.deadline|, |~settings.derandomize|, |~settings.max_examples|,
|~settings.phases|, |~settings.print_blob|, |~settings.report_multiple_bugs|,
and |~settings.suppress_health_check| settings do not affect |fuzz_one_input|.
Example Usage
-------------
.. code-block:: python
@given(st.text())
def test_foo(s): ...
# This is a traditional fuzz target - call it with a bytestring,
# or a binary IO object, and it runs the test once.
fuzz_target = test_foo.hypothesis.fuzz_one_input
# For example:
fuzz_target(b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00")
fuzz_target(io.BytesIO(b"\\x01"))
.. tip::
If you expect to discover many failures while using |fuzz_one_input|,
consider wrapping your database with |BackgroundWriteDatabase|, for
low-overhead writes of failures.
.. tip::
| Want an integrated workflow for your team's local tests, CI, and continuous fuzzing?
| Use `HypoFuzz <https://hypofuzz.com/>`__ to fuzz your whole test suite, and find more bugs with the same tests!
.. seealso::
See also the :doc:`/how-to/external-fuzzers` how-to.
"""
# Note: most users, if they care about fuzzer performance, will access the
# property and assign it to a local variable to move the attribute lookup
# outside their fuzzing loop / before the fork point. We cache it anyway,
# so that naive or unusual use-cases get the best possible performance too.
try:
return self.__cached_target # type: ignore
except AttributeError:
self.__cached_target = self._get_fuzz_target()
return self.__cached_target
@overload
def given(
_: EllipsisType, /
) -> Callable[
[Callable[..., Coroutine[Any, Any, None] | None]], Callable[[], None]
]: # pragma: no cover
...
@overload
def given(
*_given_arguments: SearchStrategy[Any],
) -> Callable[
[Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
]: # pragma: no cover
...
@overload
def given(
**_given_kwargs: SearchStrategy[Any] | EllipsisType,
) -> Callable[
[Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
]: # pragma: no cover
...
def given(
*_given_arguments: SearchStrategy[Any] | EllipsisType,
**_given_kwargs: SearchStrategy[Any] | EllipsisType,
) -> Callable[[Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]]:
"""
The |@given| decorator turns a function into a Hypothesis test. This is the
main entry point to Hypothesis.
.. seealso::
See also the :doc:`/tutorial/introduction` tutorial, which introduces
defining Hypothesis tests with |@given|.
.. _given-arguments:
Arguments to ``@given``
-----------------------
Arguments to |@given| may be either positional or keyword arguments:
.. code-block:: python
@given(st.integers(), st.floats())
def test_one(x, y):
pass
@given(x=st.integers(), y=st.floats())
def test_two(x, y):
pass
If using keyword arguments, the arguments may appear in any order, as with
standard Python functions:
.. code-block:: python
# different order, but still equivalent to before
@given(y=st.floats(), x=st.integers())
def test(x, y):
assert isinstance(x, int)
assert isinstance(y, float)
If |@given| is provided fewer positional arguments than the decorated test,
the test arguments are filled in on the right side, leaving the leftmost
positional arguments unfilled:
.. code-block:: python
@given(st.integers(), st.floats())
def test(manual_string, y, z):
assert manual_string == "x"
assert isinstance(y, int)
assert isinstance(z, float)
# `test` is now a callable which takes one argument `manual_string`
test("x")
# or equivalently:
test(manual_string="x")
The reason for this "from the right" behavior is to support using |@given|
with instance methods, by automatically passing through ``self``:
.. code-block:: python
class MyTest(TestCase):
@given(st.integers())
def test(self, x):
assert isinstance(self, MyTest)
assert isinstance(x, int)
If (and only if) using keyword arguments, |@given| may be combined with
``**kwargs`` or ``*args``:
.. code-block:: python
@given(x=integers(), y=integers())
def test(x, **kwargs):
assert "y" in kwargs
@given(x=integers(), y=integers())
def test(x, *args, **kwargs):
assert args == ()
assert "x" not in kwargs
assert "y" in kwargs
It is an error to:
* Mix positional and keyword arguments to |@given|.
* Use |@given| with a function that has a default value for an argument.
* Use |@given| with positional arguments with a function that uses ``*args``,
``**kwargs``, or keyword-only arguments.
The function returned by given has all the same arguments as the original
test, minus those that are filled in by |@given|. See the :ref:`notes on
framework compatibility <framework-compatibility>` for how this interacts
with features of other testing libraries, such as :pypi:`pytest` fixtures.
"""
if currently_in_test_context():
fail_health_check(
Settings(),
"Nesting @given tests results in quadratic generation and shrinking "
"behavior, and can usually be more cleanly expressed by replacing the "
"inner function with an st.data() parameter on the outer @given."
"\n\n"
"If it is difficult or impossible to refactor this test to remove the "
"nested @given, you can disable this health check with "
"@settings(suppress_health_check=[HealthCheck.nested_given]) on the "
"outer @given. See "
"https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
"for details.",
HealthCheck.nested_given,
)
def run_test_as_given(test):
if inspect.isclass(test):
# Provide a meaningful error to users, instead of exceptions from
# internals that assume we're dealing with a function.
raise InvalidArgument("@given cannot be applied to a class")
if (
"_pytest" in sys.modules
and "_pytest.fixtures" in sys.modules
and (
tuple(map(int, sys.modules["_pytest"].__version__.split(".")[:2]))
>= (8, 4)
)
and isinstance(
test, sys.modules["_pytest.fixtures"].FixtureFunctionDefinition
)
): # pragma: no cover # covered by pytest/test_fixtures, but not by cover/
raise InvalidArgument("@given cannot be applied to a pytest fixture")
given_arguments = tuple(_given_arguments)
given_kwargs = dict(_given_kwargs)
original_sig = get_signature(test)
if given_arguments == (Ellipsis,) and not given_kwargs:
# user indicated that they want to infer all arguments
given_kwargs = {
p.name: Ellipsis
for p in original_sig.parameters.values()
if p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
}
given_arguments = ()
check_invalid = is_invalid_test(
test, original_sig, given_arguments, given_kwargs
)
# If the argument check found problems, return a dummy test function
# that will raise an error if it is actually called.
if check_invalid is not None:
return check_invalid
# Because the argument check succeeded, we can convert @given's
# positional arguments into keyword arguments for simplicity.
if given_arguments:
assert not given_kwargs
posargs = [
p.name
for p in original_sig.parameters.values()
if p.kind is p.POSITIONAL_OR_KEYWORD
]
given_kwargs = dict(
list(zip(posargs[::-1], given_arguments[::-1], strict=False))[::-1]
)
# These have been converted, so delete them to prevent accidental use.
del given_arguments
new_signature = new_given_signature(original_sig, given_kwargs)
# Use type information to convert "infer" arguments into appropriate strategies.
if ... in given_kwargs.values():
hints = get_type_hints(test)
for name in [name for name, value in given_kwargs.items() if value is ...]:
if name not in hints:
return _invalid(
f"passed {name}=... for {test.__name__}, but {name} has "
"no type annotation",
test=test,
given_kwargs=given_kwargs,
)
given_kwargs[name] = st.from_type(hints[name])
# only raise if the same thread uses two different executors, not if two
# different threads use different executors.
thread_local = ThreadLocal(prev_self=lambda: not_set)
# maps thread_id to whether that thread overlaps in execution with any
# other thread in this @given. We use this to detect whether an @given is
# being run from multiple different threads at once, which informs
# decisions like whether to raise DeadlineExceeded or HealthCheck.too_slow.
thread_overlap: dict[int, bool] = {}
thread_overlap_lock = Lock()
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, new_signature)
def wrapped_test(*arguments, **kwargs):
# Tell pytest to omit the body of this function from tracebacks
__tracebackhide__ = True
with thread_overlap_lock:
for overlap_thread_id in thread_overlap:
thread_overlap[overlap_thread_id] = True
threadid = threading.get_ident()
# if there are existing threads when this thread starts, then
# this thread starts at an overlapped state.
has_existing_threads = len(thread_overlap) > 0
thread_overlap[threadid] = has_existing_threads
try:
test = wrapped_test.hypothesis.inner_test
if getattr(test, "is_hypothesis_test", False):
raise InvalidArgument(
f"You have applied @given to the test {test.__name__} more than "
"once, which wraps the test several times and is extremely slow. "
"A similar effect can be gained by combining the arguments "
"of the two calls to given. For example, instead of "
"@given(booleans()) @given(integers()), you could write "
"@given(booleans(), integers())"
)
settings = wrapped_test._hypothesis_internal_use_settings
random = get_random_for_wrapped_test(test, wrapped_test)
arguments, kwargs, stuff = process_arguments_to_given(
wrapped_test,
arguments,
kwargs,
given_kwargs,
new_signature.parameters,
)
if (
inspect.iscoroutinefunction(test)
and get_executor(stuff.selfy) is default_executor
):
# See https://github.com/HypothesisWorks/hypothesis/issues/3054
# If our custom executor doesn't handle coroutines, or we return an
# awaitable from a non-async-def function, we just rely on the
# return_value health check. This catches most user errors though.
raise InvalidArgument(
"Hypothesis doesn't know how to run async test functions like "
f"{test.__name__}. You'll need to write a custom executor, "
"or use a library like pytest-asyncio or pytest-trio which can "
"handle the translation for you.\n See https://hypothesis."
"readthedocs.io/en/latest/details.html#custom-function-execution"
)
runner = stuff.selfy
if isinstance(stuff.selfy, TestCase) and test.__name__ in dir(TestCase):
fail_health_check(
settings,
f"You have applied @given to the method {test.__name__}, which is "
"used by the unittest runner but is not itself a test. "
"This is not useful in any way.",
HealthCheck.not_a_test_method,
)
if bad_django_TestCase(runner): # pragma: no cover
# Covered by the Django tests, but not the pytest coverage task
raise InvalidArgument(
"You have applied @given to a method on "
f"{type(runner).__qualname__}, but this "
"class does not inherit from the supported versions in "
"`hypothesis.extra.django`. Use the Hypothesis variants "
"to ensure that each example is run in a separate "
"database transaction."
)
nonlocal thread_local
# Check selfy really is self (not e.g. a mock) before we health-check
cur_self = (
stuff.selfy
if getattr(type(stuff.selfy), test.__name__, None) is wrapped_test
else None
)
if thread_local.prev_self is not_set:
thread_local.prev_self = cur_self
elif cur_self is not thread_local.prev_self:
fail_health_check(
settings,
f"The method {test.__qualname__} was called from multiple "
"different executors. This may lead to flaky tests and "
"nonreproducible errors when replaying from database."
"\n\n"
"Unlike most health checks, HealthCheck.differing_executors "
"warns about a correctness issue with your test. We "
"therefore recommend fixing the underlying issue, rather "
"than suppressing this health check. However, if you are "
"confident this health check can be safely disabled, you can "
"do so with "
"@settings(suppress_health_check=[HealthCheck.differing_executors]). "
"See "
"https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
"for details.",
HealthCheck.differing_executors,
)
state = StateForActualGivenExecution(
stuff,
test,
settings,
random,
wrapped_test,
thread_overlap=thread_overlap,
)
# If there was a @reproduce_failure decorator, use it to reproduce
# the error (or complain that we couldn't). Either way, this will
# always raise some kind of error.
if (
reproduce_failure := wrapped_test._hypothesis_internal_use_reproduce_failure
) is not None:
expected_version, failure = reproduce_failure
if expected_version != __version__:
raise InvalidArgument(
"Attempting to reproduce a failure from a different "
f"version of Hypothesis. This failure is from {expected_version}, but "
f"you are currently running {__version__!r}. Please change your "
"Hypothesis version to a matching one."
)
try:
state.execute_once(
ConjectureData.for_choices(decode_failure(failure)),
print_example=True,
is_final=True,
)
raise DidNotReproduce(
"Expected the test to raise an error, but it "
"completed successfully."
)
except StopTest:
raise DidNotReproduce(
"The shape of the test data has changed in some way "
"from where this blob was defined. Are you sure "
"you're running the same test?"
) from None
except UnsatisfiedAssumption:
raise DidNotReproduce(
"The test data failed to satisfy an assumption in the "
"test. Have you added it since this blob was generated?"
) from None
# There was no @reproduce_failure, so start by running any explicit
# examples from @example decorators.
if errors := list(
execute_explicit_examples(
state, wrapped_test, arguments, kwargs, original_sig
)
):
# If we're not going to report multiple bugs, we would have
# stopped running explicit examples at the first failure.
assert len(errors) == 1 or state.settings.report_multiple_bugs
# If an explicit example raised a 'skip' exception, ensure it's never
# wrapped up in an exception group. Because we break out of the loop
# immediately on finding a skip, if present it's always the last error.
if isinstance(errors[-1][1], skip_exceptions_to_reraise()):
# Covered by `test_issue_3453_regression`, just in a subprocess.
del errors[:-1] # pragma: no cover
_raise_to_user(errors, state.settings, [], " in explicit examples")
# If there were any explicit examples, they all ran successfully.
# The next step is to use the Conjecture engine to run the test on
# many different inputs.
ran_explicit_examples = (
Phase.explicit in state.settings.phases
and getattr(wrapped_test, "hypothesis_explicit_examples", ())
)
SKIP_BECAUSE_NO_EXAMPLES = unittest.SkipTest(
"Hypothesis has been told to run no examples for this test."
)
if not (
Phase.reuse in settings.phases or Phase.generate in settings.phases
):
if not ran_explicit_examples:
raise SKIP_BECAUSE_NO_EXAMPLES
return
try:
if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
subTest = runner.subTest
try:
runner.subTest = types.MethodType(fake_subTest, runner)
state.run_engine()
finally:
runner.subTest = subTest
else:
state.run_engine()
except BaseException as e:
# The exception caught here should either be an actual test
# failure (or BaseExceptionGroup), or some kind of fatal error
# that caused the engine to stop.
generated_seed = (
wrapped_test._hypothesis_internal_use_generated_seed
)
with local_settings(settings):
if not (state.failed_normally or generated_seed is None):
if running_under_pytest:
report(
f"You can add @seed({generated_seed}) to this test or "
f"run pytest with --hypothesis-seed={generated_seed} "
"to reproduce this failure."
)
else:
report(
f"You can add @seed({generated_seed}) to this test to "
"reproduce this failure."
)
# The dance here is to avoid showing users long tracebacks
# full of Hypothesis internals they don't care about.
# We have to do this inline, to avoid adding another
# internal stack frame just when we've removed the rest.
#
# Using a variable for our trimmed error ensures that the line
# which will actually appear in tracebacks is as clear as
# possible - "raise the_error_hypothesis_found".
the_error_hypothesis_found = e.with_traceback(
None
if isinstance(e, BaseExceptionGroup)
else get_trimmed_traceback()
)
raise the_error_hypothesis_found
if not (ran_explicit_examples or state.ever_executed):
raise SKIP_BECAUSE_NO_EXAMPLES
finally:
with thread_overlap_lock:
del thread_overlap[threadid]
def _get_fuzz_target() -> (
Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]
):
# Because fuzzing interfaces are very performance-sensitive, we use a
# somewhat more complicated structure here. `_get_fuzz_target()` is
# called by the `HypothesisHandle.fuzz_one_input` property, allowing
# us to defer our collection of the settings, random instance, and
# reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.
#
# We then share the performance cost of setting up `state` between
# many invocations of the target. We explicitly force `deadline=None`
# for performance reasons, saving ~40% the runtime of an empty test.
test = wrapped_test.hypothesis.inner_test
settings = Settings(
parent=wrapped_test._hypothesis_internal_use_settings, deadline=None
)
random = get_random_for_wrapped_test(test, wrapped_test)
_args, _kwargs, stuff = process_arguments_to_given(
wrapped_test, (), {}, given_kwargs, new_signature.parameters
)
assert not _args
assert not _kwargs
state = StateForActualGivenExecution(
stuff,
test,
settings,
random,
wrapped_test,
thread_overlap=thread_overlap,
)
database_key = function_digest(test) + b".secondary"
# We track the minimal-so-far example for each distinct origin, so
# that we track log-n instead of n examples for long runs. In particular
# it means that we saturate for common errors in long runs instead of
# storing huge volumes of low-value data.
minimal_failures: dict = {}
def fuzz_one_input(
buffer: bytes | bytearray | memoryview | BinaryIO,
) -> bytes | None:
# This inner part is all that the fuzzer will actually run,
# so we keep it as small and as fast as possible.
if isinstance(buffer, io.IOBase):
buffer = buffer.read(BUFFER_SIZE)
assert isinstance(buffer, (bytes, bytearray, memoryview))
data = ConjectureData(
random=None,
provider=BytestringProvider,
provider_kw={"bytestring": buffer},
)
try:
state.execute_once(data)
status = Status.VALID
except StopTest:
status = data.status
return None
except UnsatisfiedAssumption:
status = Status.INVALID
return None
except BaseException:
known = minimal_failures.get(data.interesting_origin)
if settings.database is not None and (
known is None or sort_key(data.nodes) <= sort_key(known)
):
settings.database.save(
database_key, choices_to_bytes(data.choices)
)
minimal_failures[data.interesting_origin] = data.nodes
status = Status.INTERESTING
raise
finally:
if observability_enabled():
data.freeze()
tc = make_testcase(
run_start=state._start_timestamp,
property=state.test_identifier,
data=data,
how_generated="fuzz_one_input",
representation=state._string_repr,
arguments=data._observability_args,
timing=state._timing_features,
coverage=None,
status=status,
backend_metadata=data.provider.observe_test_case(),
)
deliver_observation(tc)
state._timing_features = {}
assert isinstance(data.provider, BytestringProvider)
return bytes(data.provider.drawn)
fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__
return fuzz_one_input
# After having created the decorated test function, we need to copy
# over some attributes to make the switch as seamless as possible.
for attrib in dir(test):
if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
setattr(wrapped_test, attrib, getattr(test, attrib))
wrapped_test.is_hypothesis_test = True
if hasattr(test, "_hypothesis_internal_settings_applied"):
# Used to check if @settings is applied twice.
wrapped_test._hypothesis_internal_settings_applied = True
wrapped_test._hypothesis_internal_use_seed = getattr(
test, "_hypothesis_internal_use_seed", None
)
wrapped_test._hypothesis_internal_use_settings = (
getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
)
wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
test, "_hypothesis_internal_use_reproduce_failure", None
)
wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target, given_kwargs)
return wrapped_test
return run_test_as_given
def find(
specifier: SearchStrategy[Ex],
condition: Callable[[Any], bool],
*,
settings: Settings | None = None,
random: Random | None = None,
database_key: bytes | None = None,
) -> Ex:
"""Returns the minimal example from the given strategy ``specifier`` that
matches the predicate function ``condition``."""
if settings is None:
settings = Settings(max_examples=2000)
settings = Settings(
settings, suppress_health_check=list(HealthCheck), report_multiple_bugs=False
)
if database_key is None and settings.database is not None:
# Note: The database key is not guaranteed to be unique. If not, replaying
# of database examples may fail to reproduce due to being replayed on the
# wrong condition.
database_key = function_digest(condition)
if not isinstance(specifier, SearchStrategy):
raise InvalidArgument(
f"Expected SearchStrategy but got {specifier!r} of "
f"type {type(specifier).__name__}"
)
specifier.validate()
last: list[Ex] = []
@settings
@given(specifier)
def test(v):
if condition(v):
last[:] = [v]
raise Found
if random is not None:
test = seed(random.getrandbits(64))(test)
test._hypothesis_internal_database_key = database_key # type: ignore
try:
test()
except Found:
return last[0]
raise NoSuchExample(get_pretty_function_description(condition))
| HypothesisHandle |
python | getsentry__sentry | src/sentry/api/endpoints/project_transaction_threshold.py | {
"start": 1342,
"end": 4477
} | class ____(ProjectEndpoint):
owner = ApiOwner.DATA_BROWSING
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectSettingPermission,)
def has_feature(self, project, request):
return features.has(
"organizations:performance-view", project.organization, actor=request.user
)
def get(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
try:
project_threshold = ProjectTransactionThreshold.objects.get(
project=project,
organization=project.organization,
)
except ProjectTransactionThreshold.DoesNotExist:
return Response(
data={"projectId": str(project.id), **DEFAULT_THRESHOLD},
status=status.HTTP_200_OK,
)
return Response(
serialize(
project_threshold,
request.user,
),
status.HTTP_200_OK,
)
def post(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
serializer = ProjectTransactionThresholdSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
data = serializer.validated_data
defaults = {"edited_by_id": request.user.id}
if data.get("threshold") is not None:
defaults["threshold"] = data.get("threshold")
if data.get("metric") is not None:
defaults["metric"] = data.get("metric")
project_threshold, created = ProjectTransactionThreshold.objects.update_or_create(
project=project,
organization=project.organization,
create_defaults={
"threshold": data.get("threshold", 300),
"metric": data.get("metric", TransactionMetric.DURATION.value),
"edited_by_id": request.user.id,
},
defaults=defaults,
)
return Response(
serialize(
project_threshold,
request.user,
),
status=status.HTTP_201_CREATED if created else status.HTTP_200_OK,
)
def delete(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
try:
project_threshold = ProjectTransactionThreshold.objects.get(
project=project,
organization=project.organization,
)
except ProjectTransactionThreshold.DoesNotExist:
return Response(status=status.HTTP_204_NO_CONTENT)
project_threshold.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| ProjectTransactionThresholdEndpoint |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail_baseConfig.py | {
"start": 8527,
"end": 8816
} | class ____(BaseModel):
x: int
y: str
class Config:
alias_generator = None
frozen = True
extra = Extra.forbid
frozenmodel = FrozenModel(x=1, y='b')
frozenmodel.y = 'a'
# MYPY: error: Property "y" defined in "FrozenModel" is read-only [misc]
| FrozenModel |
python | joke2k__faker | faker/providers/job/uk_UA/__init__.py | {
"start": 218,
"end": 3727
} | class ____(BaseProvider):
jobs = [
# А
"Агроном",
"Адвокат",
"Актор",
"Акушер",
"Антрополог",
"Архітектор",
"Археолог",
"Астронавт",
"Астроном",
"Астрофізик",
# Б
"Бібліограф",
"Біолог",
"Бізнесмен",
"Ботанік",
"Будник",
"Бухгалтер",
"Бібліотекар",
# В
"Ветеринар",
"Випробувач",
"Водій",
"Вчитель",
"Візажист",
# Г
"Географ",
"Геолог",
"Геофізик",
"Гицель",
"Гінеколог",
"Гірник",
"Гірничий інженер",
"Головний меркшейдер",
"Графік",
"Громадський діяч",
# Ґ
"Ґрунтознавець",
# Д
"Державний службовець",
"Дерун",
"Детектив",
"Дизайнер",
"Дипломат",
"Диригент",
"Доцент",
"Драматург",
"Ді-джей",
"Дантист",
# Е
"Економіст",
"Електрик",
"Електромонтер",
"Електромонтажник",
"Електрослюсар",
"Електротехнік",
"Епідеміолог",
"Етнограф",
# Є
"Євнух",
"Єгер",
# Ж
"Журналіст",
"Живописець",
# З
"Золотар",
"Зоолог",
# І
"Інженер",
"Історик",
# К
"Каскадер",
"Кінорежисер",
"Клавішник",
"Клоун",
"Композитор",
"Конструктор",
"Краєзнавець",
"Кушнір",
"Кіноактор",
"Кінокритик",
"Кінорежисер",
"Кур'єр",
"Кухар",
"Кінолог",
"Круп'є",
# Л
"Льотчик",
"Лікар",
"Літературознавець",
"Локсмайстер",
# М
"Математик",
"Машиніст",
"Медик",
"Менеджер",
"Мистецтвознавець",
"Мірошник",
"Мікробіолог",
"Мінералог",
"Мовознавець",
"Модель",
"Модельєр",
"Музикант",
"Музикознавець",
"Музичний редактор",
"Маркетолог",
"М'ясник",
# Н
"Намотувальник",
"Науковець",
"Няня",
"Нотаріус",
# П
"Палеонтолог",
"Паралегал",
"Парфюмер",
"Патологоанатом",
"Педагог",
"Пекар",
"Перекладач",
"Петрограф",
"Письменник",
"Піаніст",
"Підприємець",
"Пілот",
"Правник",
"Програміст",
"Провізор",
"Прокурор",
"Промисловець",
"Професор",
"Психолог",
"Публіцист",
"Продюсер",
"Паблік рилейшнз",
# Р
"Режисер",
"Різноробочий",
"Реабілітолог",
"Редактор",
"Реставратор",
"Ріелтор",
# С
"Сантехнік",
"Складальник",
"Скульптор",
"Соціолог",
"Співак",
"Сценарист",
"Стропальник",
"Стоматолог",
"Слідчий",
"Стиліст",
"Секретар",
# Ф
"Фармацевт",
"Фермер",
"Фізик",
"Філолог",
"Фольклорист",
"Фотограф",
"Фрілансер",
"Футболіст",
"Флорист",
# Х
"Хімік",
"Художник",
"Хореограф",
# Ш
"Шериф",
"Швачка",
"Штукатур",
# Ю
"Ювелір",
"Юрист",
]
| Provider |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore.py | {
"start": 936,
"end": 1640
} | class ____(Callback):
def __init__(self, driver_semaphore, trainer_semaphore):
self.driver_semaphore = driver_semaphore
self.trainer_semaphore = trainer_semaphore
def on_step_end(self, iteration, trials, **info):
self.driver_semaphore.release() # Driver should continue
self.trainer_semaphore.acquire() # Wait until released
def _run(local_dir, driver_semaphore, trainer_semaphore):
def _train(config):
for i in range(7):
ray.tune.report(dict(val=i))
tune.run(
_train,
storage_path=local_dir,
name="interrupt",
callbacks=[SteppingCallback(driver_semaphore, trainer_semaphore)],
)
| SteppingCallback |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_build_details_models.py | {
"start": 2282,
"end": 2641
} | class ____(BaseModel):
state: Literal[PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED] = (
PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED
)
error_code: int
error_message: str
SizeInfo = Annotated[
SizeInfoPending | SizeInfoProcessing | SizeInfoCompleted | SizeInfoFailed,
Field(discriminator="state"),
]
| SizeInfoFailed |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Display.py | {
"start": 218,
"end": 4038
} | class ____(Node):
"""Connection to PlotWidget. Will plot arrays, and display event lists."""
nodeName = 'PlotWidget'
sigPlotChanged = QtCore.Signal(object)
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in', 'multi': True}})
self.plot = None # currently selected plot
self.plots = {} # list of available plots user may select from
self.ui = None
self.items = {}
def disconnected(self, localTerm, remoteTerm):
if localTerm is self['In'] and remoteTerm in self.items:
self.plot.removeItem(self.items[remoteTerm])
del self.items[remoteTerm]
def setPlot(self, plot):
#print "======set plot"
if plot == self.plot:
return
# clear data from previous plot
if self.plot is not None:
for vid in list(self.items.keys()):
self.plot.removeItem(self.items[vid])
del self.items[vid]
self.plot = plot
self.updateUi()
self.update()
self.sigPlotChanged.emit(self)
def getPlot(self):
return self.plot
def process(self, In, display=True):
if display and self.plot is not None:
items = set()
# Add all new input items to selected plot
for name, vals in In.items():
if vals is None:
continue
if type(vals) is not list:
vals = [vals]
for val in vals:
vid = id(val)
if vid in self.items and self.items[vid].scene() is self.plot.scene():
# Item is already added to the correct scene
# possible bug: what if two plots occupy the same scene? (should
# rarely be a problem because items are removed from a plot before
# switching).
items.add(vid)
else:
# Add the item to the plot, or generate a new item if needed.
if isinstance(val, QtWidgets.QGraphicsItem):
self.plot.addItem(val)
item = val
else:
item = self.plot.plot(val)
self.items[vid] = item
items.add(vid)
# Any left-over items that did not appear in the input must be removed
for vid in list(self.items.keys()):
if vid not in items:
self.plot.removeItem(self.items[vid])
del self.items[vid]
def processBypassed(self, args):
if self.plot is None:
return
for item in list(self.items.values()):
self.plot.removeItem(item)
self.items = {}
def ctrlWidget(self):
if self.ui is None:
self.ui = ComboBox()
self.ui.currentIndexChanged.connect(self.plotSelected)
self.updateUi()
return self.ui
def plotSelected(self, index):
self.setPlot(self.ui.value())
def setPlotList(self, plots):
"""
Specify the set of plots (PlotWidget or PlotItem) that the user may
select from.
*plots* must be a dictionary of {name: plot} pairs.
"""
self.plots = plots
self.updateUi()
def updateUi(self):
# sets list and automatically preserves previous selection
self.ui.setItems(self.plots)
try:
self.ui.setValue(self.plot)
except ValueError:
pass
| PlotWidgetNode |
python | getsentry__sentry | src/sentry/analytics/events/issue_mark_reviewed.py | {
"start": 76,
"end": 267
} | class ____(analytics.Event):
user_id: int | None = None
default_user_id: int
organization_id: int
group_id: int
analytics.register(IssueMarkReviewedEvent)
| IssueMarkReviewedEvent |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/cutlass_utils.py | {
"start": 15415,
"end": 17111
} | class ____:
# Helper class for Benchmarking and Testing CUTLASS Kernels in isolation.
# Can be used to capture the sourcecode passed to CUDACodeCache.compile
def __init__(self):
self.sources = []
self._compile_patch = None
def __enter__(self, *args, **kwargs):
import unittest.mock as mock
import torch._inductor.codecache
_compile_method_orig = torch._inductor.codecache.CUDACodeCache.compile
def my_compile(
source_code, dst_file_ext, extra_args: Optional[list[str]] = None
):
self.sources.append(source_code)
return _compile_method_orig(source_code, dst_file_ext)
# pyrefly: ignore [bad-assignment]
self._compile_patch = mock.patch(
"torch._inductor.codecache.CUDACodeCache.compile", my_compile
)
self._compile_patch.__enter__(*args, **kwargs) # type: ignore[union-attr]
return self
def __exit__(self, *args, **kwargs):
self._compile_patch.__exit__(*args, **kwargs) # type: ignore[union-attr]
def cuda_standalone_runner_compile_command(srcpath: Path, exepath: Path):
# returns command string to compile a (captured) CUDA GEMM Kernel source to a standalone executable that's ready to run
# Passes the correct preprocessor define to nvcc to ensure the standalone runner is enabled.
from torch._inductor.codecache import cuda_compile_command
extra_args = ["-DGENERATE_STANDALONE_RUNNER=1", "-DCUTLASS_DEBUG_TRACE_LEVEL=1"]
compile_command = cuda_compile_command(
[str(srcpath)], str(exepath), "exe", extra_args=extra_args
)
return compile_command
| CUDACompileSourceCapturingContext |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 19912,
"end": 23275
} | class ____(CollectionTestCase):
"""
Test warning in ``collectstatic`` output when a file is skipped because a
previous file was already written to the same path.
"""
# If this string is in the collectstatic output, it means the warning we're
# looking for was emitted.
warning_string = "Found another file"
def _collectstatic_output(self, verbosity=3, **kwargs):
"""
Run collectstatic, and capture and return the output.
"""
out = StringIO()
call_command(
"collectstatic",
interactive=False,
verbosity=verbosity,
stdout=out,
**kwargs,
)
return out.getvalue()
def test_no_warning(self):
"""
There isn't a warning if there isn't a duplicate destination.
"""
output = self._collectstatic_output(clear=True)
self.assertNotIn(self.warning_string, output)
def test_warning_at_verbosity_2(self):
"""
There is a warning when there are duplicate destinations at verbosity
2+.
"""
with tempfile.TemporaryDirectory() as static_dir:
duplicate = os.path.join(static_dir, "test", "file.txt")
os.mkdir(os.path.dirname(duplicate))
with open(duplicate, "w+") as f:
f.write("duplicate of file.txt")
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True, verbosity=2)
self.assertIn(self.warning_string, output)
def test_no_warning_at_verbosity_1(self):
"""
There is no individual warning at verbosity 1, but summary is shown.
"""
with tempfile.TemporaryDirectory() as static_dir:
duplicate = os.path.join(static_dir, "test", "file.txt")
os.mkdir(os.path.dirname(duplicate))
with open(duplicate, "w+") as f:
f.write("duplicate of file.txt")
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True, verbosity=1)
self.assertNotIn(self.warning_string, output)
self.assertIn("1 skipped due to conflict", output)
def test_summary_multiple_conflicts(self):
"""
Summary shows correct count for multiple conflicts.
"""
with tempfile.TemporaryDirectory() as static_dir:
duplicate1 = os.path.join(static_dir, "test", "file.txt")
os.makedirs(os.path.dirname(duplicate1))
with open(duplicate1, "w+") as f:
f.write("duplicate of file.txt")
duplicate2 = os.path.join(static_dir, "test", "file1.txt")
with open(duplicate2, "w+") as f:
f.write("duplicate of file1.txt")
duplicate3 = os.path.join(static_dir, "test", "nonascii.css")
shutil.copy2(duplicate1, duplicate3)
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True, verbosity=1)
self.assertIn("3 skipped due to conflict", output)
@override_settings(
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "staticfiles_tests.storage.DummyStorage"
},
}
)
| TestCollectionOverwriteWarning |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 2962,
"end": 3692
} | class ____(ConverterAPI):
"""
A converter that converts an :class:`~ape.api.address.BaseAddress`
to a :class`~ape.types.address.AddressType`.
"""
def is_convertible(self, value: Any) -> bool:
return isinstance(value, BaseAddress)
def convert(self, value: BaseAddress) -> AddressType:
"""
Convert the given value to :class:`~ape.types.address.AddressType`.
Args:
value (str): The value to convert.
Returns:
:class:`~ape.types.address.AddressType`: An alias to
`ChecksumAddress <https://eth-typing.readthedocs.io/en/latest/types.html#checksumaddress>`__. # noqa: E501
"""
return value.address
| AddressAPIConverter |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 2508,
"end": 2860
} | class ____(SentryAPIException):
status_code = status.HTTP_401_UNAUTHORIZED
code = "member-disabled-over-limit"
message = "Organization over member limit"
def __init__(self, organization):
super().__init__(
next=reverse("sentry-organization-disabled-member", args=[organization.slug])
)
| MemberDisabledOverLimit |
python | Pylons__pyramid | tests/test_session.py | {
"start": 21224,
"end": 21440
} | class ____:
def dumps(self, value):
return base64.b64encode(json.dumps(value).encode('utf-8'))
def loads(self, value):
return json.loads(base64.b64decode(value).decode('utf-8'))
| DummySerializer |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 1553,
"end": 1856
} | class ____(AccountsError):
"""
Raised when attempting to add an account using an alias
that already maps to another account.
"""
def __init__(self, alias: str):
self.alias = alias
super().__init__(f"Account with alias '{alias}' already in use.")
| AliasAlreadyInUseError |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-opendal/llama_index/readers/opendal/base.py | {
"start": 420,
"end": 2728
} | class ____(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs,
) -> None:
"""
Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
import opendal
super().__init__()
self.path = path
self.file_extractor = file_extractor
self.op = opendal.AsyncOperator(scheme, **kwargs)
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
asyncio.run(download_file_from_opendal(self.op, temp_dir, self.path))
else:
asyncio.run(download_dir_from_opendal(self.op, temp_dir, self.path))
loader = SimpleDirectoryReader(temp_dir, file_extractor=self.file_extractor)
return loader.load_data()
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> str:
"""Download file from OpenDAL."""
import opendal
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
w.write(await r.read())
return filepath
async def download_dir_from_opendal(op: Any, temp_dir: str, dir: str) -> str:
"""Download directory from opendal."""
import opendal
op = cast(opendal.AsyncOperator, op)
async for obj in await op.scan(dir):
await download_file_from_opendal(op, temp_dir, obj.path)
| OpendalReader |
python | joke2k__faker | faker/providers/lorem/tl_PH/__init__.py | {
"start": 49,
"end": 325
} | class ____(FilPhProvider):
"""Implement lorem provider for ``tl_PH`` locale.
There is no difference from the |FilPhLoremProvider|.
.. |FilPhLoremProvider| replace::
:meth:`FilPhLoremProvider <faker.providers.lorem.fil_PH.Provider>`
"""
pass
| Provider |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 259,
"end": 336
} | class ____(NamedTuple):
resource: Any
arn: str
| SageMakerResourceWithArn |
python | PyCQA__pylint | doc/data/messages/i/invalid-field-call/bad.py | {
"start": 54,
"end": 263
} | class ____:
a: float
b: float
c: float
field(init=False) # [invalid-field-call]
def __post_init__(self):
self.c = self.a + self.b
print(field(init=False)) # [invalid-field-call]
| C |
python | docker__docker-py | docker/types/services.py | {
"start": 26340,
"end": 27430
} | class ____(dict):
"""
Config reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a config is made accessible inside the service's
containers.
Args:
config_id (string): Config's ID
config_name (string): Config's name as defined at its creation.
filename (string): Name of the file containing the config. Defaults
to the config's name if not specified.
uid (string): UID of the config file's owner. Default: 0
gid (string): GID of the config file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource('config_id')
def __init__(self, config_id, config_name, filename=None, uid=None,
gid=None, mode=0o444):
self['ConfigName'] = config_name
self['ConfigID'] = config_id
self['File'] = {
'Name': filename or config_name,
'UID': uid or '0',
'GID': gid or '0',
'Mode': mode
}
| ConfigReference |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 232010,
"end": 232534
} | class ____(sgqlc.types.Input):
"""Ordering options for Enterprise Server user account email
connections.
"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseServerUserAccountEmailOrderField), graphql_name="field")
"""The field to order emails by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| EnterpriseServerUserAccountEmailOrder |
python | pydata__xarray | xarray/tests/test_concat.py | {
"start": 15606,
"end": 43698
} | class ____:
@pytest.fixture
def data(self, request) -> Dataset:
use_extension_array = request.param if hasattr(request, "param") else False
return create_test_data(use_extension_array=use_extension_array).drop_dims(
"dim3"
)
def rectify_dim_order(self, data: Dataset, dataset) -> Dataset:
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(
{k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()},
dataset.coords,
attrs=dataset.attrs,
)
@pytest.mark.parametrize("coords", ["different", "minimal"])
@pytest.mark.parametrize(
"dim,data", [["dim1", True], ["dim2", False]], indirect=["data"]
)
def test_concat_simple(self, data: Dataset, dim, coords) -> None:
datasets = [g for _, g in data.groupby(dim)]
assert_identical(data, concat(datasets, dim, coords=coords, compat="equals"))
def test_concat_merge_variables_present_in_some_datasets(
self, data: Dataset
) -> None:
# coordinates present in some datasets but not others
ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1})
ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2})
actual = concat([ds1, ds2], dim="y", coords="minimal")
expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2})
assert_identical(expected, actual)
# data variables present in some datasets but not others
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
data0, data1 = deepcopy(split_data)
data1["foo"] = ("bar", np.random.randn(10))
actual = concat([data0, data1], "dim1", data_vars="minimal")
expected = data.copy().assign(foo=data1.foo)
assert_identical(expected, actual)
# expand foo
actual = concat([data0, data1], "dim1", data_vars="all")
foo = np.ones((8, 10), dtype=data1.foo.dtype) * np.nan
foo[3:] = data1.foo.values[None, ...]
expected = data.copy().assign(foo=(["dim1", "bar"], foo))
assert_identical(expected, actual)
@pytest.mark.parametrize("data", [False], indirect=["data"])
def test_concat_2(self, data: Dataset) -> None:
dim = "dim2"
datasets = [g.squeeze(dim) for _, g in data.groupby(dim, squeeze=False)]
concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, self.rectify_dim_order(data, actual))
@pytest.mark.parametrize("coords", ["different", "minimal", "all"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_coords_kwarg(
self, data: Dataset, dim: str, coords: Literal["all", "minimal", "different"]
) -> None:
data = data.copy(deep=True)
# make sure the coords argument behaves as expected
data.coords["extra"] = ("dim4", np.arange(3))
datasets = [g for _, g in data.groupby(dim)]
actual = concat(
datasets, data[dim], coords=coords, data_vars="all", compat="equals"
)
if coords == "all":
expected = np.array([data["extra"].values for _ in range(data.sizes[dim])])
assert_array_equal(actual["extra"].values, expected)
else:
assert_equal(data["extra"], actual["extra"])
def test_concat(self, data: Dataset) -> None:
split_data = [
data.isel(dim1=slice(3)),
data.isel(dim1=3),
data.isel(dim1=slice(4, None)),
]
assert_identical(data, concat(split_data, "dim1"))
def test_concat_dim_precedence(self, data: Dataset) -> None:
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data["dim1"]).rename("dim1")
datasets = [g for _, g in data.groupby("dim1", squeeze=False)]
expected = data.copy()
expected["dim1"] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars_typing(self) -> None:
# Testing typing, can be removed if the next function works with annotations.
data = Dataset({"foo": ("x", np.random.randn(10))})
objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
actual = concat(objs, dim="x", data_vars="minimal")
assert_identical(data, actual)
@pytest.mark.parametrize("data_vars", ["minimal", "different", "all", [], ["foo"]])
def test_concat_data_vars(self, data_vars) -> None:
data = Dataset({"foo": ("x", np.random.randn(10))})
objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
actual = concat(objs, dim="x", data_vars=data_vars, compat="equals")
assert_identical(data, actual)
@pytest.mark.parametrize("coords", ["different", "all", ["c"]])
def test_concat_coords(self, coords) -> None:
data = Dataset({"foo": ("x", np.random.randn(10))})
expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
objs = [
data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1),
]
if coords == "different":
actual = concat(objs, dim="x", coords=coords, compat="equals")
else:
actual = concat(objs, dim="x", coords=coords)
assert_identical(expected, actual)
@pytest.mark.parametrize("coords", ["minimal", []])
def test_concat_coords_raises_merge_error(self, coords) -> None:
data = Dataset({"foo": ("x", np.random.randn(10))})
objs = [
data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1),
]
with pytest.raises(merge.MergeError, match="conflicting values"):
concat(objs, dim="x", coords=coords, compat="equals")
@pytest.mark.parametrize("data_vars", ["different", "all", ["foo"]])
def test_concat_constant_index(self, data_vars) -> None:
# GH425
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]})
if data_vars == "different":
actual = concat([ds1, ds2], "y", data_vars=data_vars, compat="equals")
else:
actual = concat([ds1, ds2], "y", data_vars=data_vars)
assert_identical(expected, actual)
def test_concat_constant_index_None(self) -> None:
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
actual = concat([ds1, ds2], "new_dim", data_vars=None, compat="equals")
expected = Dataset(
{"foo": ("new_dim", [1.5, 2.5])},
coords={"y": 1},
)
assert_identical(actual, expected)
def test_concat_constant_index_minimal(self) -> None:
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.raises(merge.MergeError, match="conflicting values"):
concat([ds1, ds2], dim="new_dim", data_vars="minimal")
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(
ValueError, match="data_vars='minimal' and coords='minimal'"
):
concat([ds1, ds2], dim="new_dim", data_vars="minimal")
def test_concat_size0(self) -> None:
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, "dim1")
assert_identical(data, actual)
actual = concat(split_data[::-1], "dim1")
assert_identical(data, actual)
def test_concat_autoalign(self) -> None:
ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])})
actual = concat([ds1, ds2], "y", data_vars="all", join="outer")
expected = Dataset(
{
"foo": DataArray(
[[1, 2, np.nan], [1, np.nan, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
}
)
assert_identical(expected, actual)
def test_concat_errors(self) -> None:
data = create_test_data()
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
with pytest.raises(ValueError, match=r"must supply at least one"):
concat([], "dim1")
with pytest.raises(ValueError, match=r"Cannot specify both .*='different'"):
concat(
[data, data], dim="concat_dim", data_vars="different", compat="override"
)
with pytest.raises(ValueError, match=r"must supply at least one"):
concat([], "dim1")
with pytest.raises(ValueError, match=r"are not found in the coordinates"):
concat([data, data], "new_dim", coords=["not_found"])
with pytest.raises(ValueError, match=r"are not found in the data variables"):
concat([data, data], "new_dim", data_vars=["not_found"])
with pytest.raises(ValueError, match=r"global attributes not"):
# call deepcopy separately to get unique attrs
data0 = deepcopy(split_data[0])
data1 = deepcopy(split_data[1])
data1.attrs["foo"] = "bar"
concat([data0, data1], "dim1", compat="identical")
assert_identical(data, concat([data0, data1], "dim1", compat="equals"))
with pytest.raises(ValueError, match=r"compat.* invalid"):
concat(split_data, "dim1", compat="foobar") # type: ignore[call-overload]
with pytest.raises(ValueError, match=r"compat.* invalid"):
concat(split_data, "dim1", compat="minimal")
with pytest.raises(ValueError, match=r"unexpected value for"):
concat([data, data], "new_dim", coords="foobar")
with pytest.raises(
ValueError, match=r"coordinate in some datasets but not others"
):
concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z")
with pytest.raises(
ValueError, match=r"coordinate in some datasets but not others"
):
concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z")
def test_concat_join_kwarg(self) -> None:
ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]})
ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]})
expected: dict[JoinOptions, Any] = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join, expected_item in expected.items():
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected_item)
# regression test for #3681
actual = concat(
[ds1.drop_vars("x"), ds2.drop_vars("x")], join="override", dim="y"
)
expected2 = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]}
)
assert_identical(actual, expected2)
@pytest.mark.parametrize(
"combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": 41, "c": 43, "d": 44},
False,
),
(
lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {},
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": -1, "b": 0, "c": 1},
False,
),
],
)
def test_concat_combine_attrs_kwarg(
self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception
):
ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs=var1_attrs)
ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs=var2_attrs)
if expect_exception:
with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"):
concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
else:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
expected = Dataset(
{"a": ("x", [0, 0])}, {"x": [0, 1]}, attrs=expected_attrs
)
assert_identical(actual, expected)
@pytest.mark.parametrize(
"combine_attrs, attrs1, attrs2, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": 41, "c": 43, "d": 44},
False,
),
(
lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {},
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": -1, "b": 0, "c": 1},
False,
),
],
)
def test_concat_combine_attrs_kwarg_variables(
self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception
):
"""check that combine_attrs is used on data variables and coords"""
ds1 = Dataset({"a": ("x", [0], attrs1)}, coords={"x": ("x", [0], attrs1)})
ds2 = Dataset({"a": ("x", [0], attrs2)}, coords={"x": ("x", [1], attrs2)})
if expect_exception:
with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"):
concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
else:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
expected = Dataset(
{"a": ("x", [0, 0], expected_attrs)},
{"x": ("x", [0, 1], expected_attrs)},
)
assert_identical(actual, expected)
def test_concat_promote_shape_with_mixed_dims_within_variables(self) -> None:
objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})]
actual = concat(objs, "x")
assert_identical(actual, expected)
def test_concat_promote_shape_with_mixed_dims_between_variables(self) -> None:
objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})]
actual = concat(objs, "x", data_vars="all")
expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])})
assert_identical(actual, expected)
def test_concat_promote_shape_with_mixed_dims_in_coord_variable(self) -> None:
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])})
assert_identical(actual, expected)
def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim(
self,
) -> None:
# values should repeat
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
actual = concat(objs, "x", coords="different", compat="equals")
expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
assert_identical(actual, expected)
actual = concat(objs, "x", coords="all")
assert_identical(actual, expected)
def test_concat_promote_shape_broadcast_1d_x_1d_goes_to_2d(self) -> None:
objs = [
Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}),
Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}),
]
actual = concat(objs, "x")
expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]})
assert_identical(actual, expected)
def test_concat_promote_shape_with_scalar_coordinates(self) -> None:
# regression GH6384
objs = [
Dataset({}, {"x": pd.Interval(-1, 0, closed="right")}),
Dataset({"x": [pd.Interval(0, 1, closed="right")]}),
]
actual = concat(objs, "x")
expected = Dataset(
{
"x": [
pd.Interval(-1, 0, closed="right"),
pd.Interval(0, 1, closed="right"),
]
}
)
assert_identical(actual, expected)
def test_concat_promote_shape_with_coordinates_of_particular_dtypes(self) -> None:
# regression GH6416 (coord dtype) and GH6434
time_data1 = np.array(["2022-01-01", "2022-02-01"], dtype="datetime64[ns]")
time_data2 = np.array("2022-03-01", dtype="datetime64[ns]")
time_expected = np.array(
["2022-01-01", "2022-02-01", "2022-03-01"], dtype="datetime64[ns]"
)
objs = [Dataset({}, {"time": time_data1}), Dataset({}, {"time": time_data2})]
actual = concat(objs, "time")
expected = Dataset({}, {"time": time_expected})
assert_identical(actual, expected)
assert isinstance(actual.indexes["time"], pd.DatetimeIndex)
def test_concat_do_not_promote(self) -> None:
# GH438
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}),
]
expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]})
actual = concat(objs, "t")
assert_identical(expected, actual)
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}),
]
with pytest.raises(ValueError):
concat(objs, "t", coords="minimal")
def test_concat_dim_is_variable(self) -> None:
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
coord = Variable("y", [3, 4], attrs={"foo": "bar"})
expected = Dataset({"x": ("y", [0, 1]), "y": coord})
actual = concat(objs, coord, data_vars="all")
assert_identical(actual, expected)
def test_concat_dim_is_dataarray(self) -> None:
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
coord = DataArray([3, 4], dims="y", attrs={"foo": "bar"})
expected = Dataset({"x": ("y", [0, 1]), "y": coord})
actual = concat(objs, coord, data_vars="all")
assert_identical(actual, expected)
def test_concat_multiindex(self) -> None:
midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]])
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
expected = Dataset(coords=midx_coords)
actual = concat(
[expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x"
)
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
def test_concat_along_new_dim_multiindex(self) -> None:
# see https://github.com/pydata/xarray/issues/6881
level_names = ["x_level_0", "x_level_1"]
midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]], names=level_names)
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
ds = Dataset(coords=midx_coords)
concatenated = concat([ds], "new")
actual = list(concatenated.xindexes.get_all_coords("x"))
expected = ["x"] + level_names
assert actual == expected
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}])
def test_concat_fill_value(self, fill_value) -> None:
datasets = [
Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1])}, {"x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1])}, {"x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_a = fill_value_b = np.nan
elif isinstance(fill_value, dict):
fill_value_a = fill_value["a"]
fill_value_b = fill_value["b"]
else:
fill_value_a = fill_value_b = fill_value
expected = Dataset(
{
"a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]),
"b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]),
},
{"x": [0, 1, 2]},
)
actual = concat(
datasets, dim="t", fill_value=fill_value, data_vars="all", join="outer"
)
assert_identical(actual, expected)
@pytest.mark.parametrize("dtype", [str, bytes])
@pytest.mark.parametrize("dim", ["x1", "x2"])
def test_concat_str_dtype(self, dtype, dim) -> None:
data = np.arange(4).reshape([2, 2])
da1 = Dataset(
{
"data": (["x1", "x2"], data),
"x1": [0, 1],
"x2": np.array(["a", "b"], dtype=dtype),
}
)
da2 = Dataset(
{
"data": (["x1", "x2"], data),
"x1": np.array([1, 2]),
"x2": np.array(["c", "d"], dtype=dtype),
}
)
actual = concat([da1, da2], dim=dim, join="outer")
assert np.issubdtype(actual.x2.dtype, dtype)
def test_concat_avoids_index_auto_creation(self) -> None:
# TODO once passing indexes={} directly to Dataset constructor is allowed then no need to create coords first
coords = Coordinates(
{"x": ConcatenatableArray(np.array([1, 2, 3]))}, indexes={}
)
datasets = [
Dataset(
{"a": (["x", "y"], ConcatenatableArray(np.zeros((3, 3))))},
coords=coords,
)
for _ in range(2)
]
# should not raise on concat
combined = concat(datasets, dim="x")
assert combined["a"].shape == (6, 3)
assert combined["a"].dims == ("x", "y")
# nor have auto-created any indexes
assert combined.indexes == {}
# should not raise on stack
combined = concat(datasets, dim="z", data_vars="all")
assert combined["a"].shape == (2, 3, 3)
assert combined["a"].dims == ("z", "x", "y")
# nor have auto-created any indexes
assert combined.indexes == {}
def test_concat_avoids_index_auto_creation_new_1d_coord(self) -> None:
# create 0D coordinates (without indexes)
datasets = [
Dataset(
coords={"x": ConcatenatableArray(np.array(10))},
)
for _ in range(2)
]
with pytest.raises(UnexpectedDataAccess):
concat(datasets, dim="x", create_index_for_new_dim=True)
# should not raise on concat iff create_index_for_new_dim=False
combined = concat(datasets, dim="x", create_index_for_new_dim=False)
assert combined["x"].shape == (2,)
assert combined["x"].dims == ("x",)
# nor have auto-created any indexes
assert combined.indexes == {}
def test_concat_promote_shape_without_creating_new_index(self) -> None:
# different shapes but neither have indexes
ds1 = Dataset(coords={"x": 0})
ds2 = Dataset(data_vars={"x": [1]}).drop_indexes("x")
actual = concat([ds1, ds2], dim="x", create_index_for_new_dim=False)
expected = Dataset(data_vars={"x": [0, 1]}).drop_indexes("x")
assert_identical(actual, expected, check_default_indexes=False)
assert actual.indexes == {}
@requires_scipy_or_netCDF4
def test_concat_combine_attrs_nan_after_netcdf_roundtrip(self, tmp_path) -> None:
# Test for issue #10833: NaN attributes should be preserved
# with combine_attrs="drop_conflicts" after NetCDF roundtrip
import numpy as np
# Create arrays with matching NaN fill_value attribute
ds1 = Dataset(
{"a": ("x", [0, 1])},
attrs={"fill_value": np.nan, "sensor": "G18", "field": "CTH"},
)
ds2 = Dataset(
{"a": ("x", [2, 3])},
attrs={"fill_value": np.nan, "sensor": "G16", "field": "CTH"},
)
# Save to NetCDF and reload (converts Python float NaN to NumPy scalar NaN)
path1 = tmp_path / "ds1.nc"
path2 = tmp_path / "ds2.nc"
ds1.to_netcdf(path1)
ds2.to_netcdf(path2)
ds1_loaded = open_dataset(path1)
ds2_loaded = open_dataset(path2)
# Verify that NaN attributes are preserved after concat
actual = concat(
[ds1_loaded, ds2_loaded], dim="y", combine_attrs="drop_conflicts"
)
# fill_value should be preserved (not dropped) since both have NaN
assert "fill_value" in actual.attrs
assert np.isnan(actual.attrs["fill_value"])
# field should be preserved (identical in both)
assert actual.attrs["field"] == "CTH"
# sensor should be dropped (conflicts)
assert "sensor" not in actual.attrs
ds1_loaded.close()
ds2_loaded.close()
| TestConcatDataset |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/property_graph/transformations/simple_llm.py | {
"start": 612,
"end": 4222
} | class ____(TransformComponent):
"""
Extract triples from a graph.
Uses an LLM and a simple prompt + output parsing to extract paths (i.e. triples) from text.
Args:
llm (LLM):
The language model to use.
extract_prompt (Union[str, PromptTemplate]):
The prompt to use for extracting triples.
parse_fn (callable):
A function to parse the output of the language model.
num_workers (int):
The number of workers to use for parallel processing.
max_paths_per_chunk (int):
The maximum number of paths to extract per chunk.
"""
llm: LLM
extract_prompt: PromptTemplate
parse_fn: Callable
num_workers: int
max_paths_per_chunk: int
def __init__(
self,
llm: Optional[LLM] = None,
extract_prompt: Optional[Union[str, PromptTemplate]] = None,
parse_fn: Callable = default_parse_triplets_fn,
max_paths_per_chunk: int = 10,
num_workers: int = 4,
) -> None:
"""Init params."""
from llama_index.core import Settings
if isinstance(extract_prompt, str):
extract_prompt = PromptTemplate(extract_prompt)
super().__init__(
llm=llm or Settings.llm,
extract_prompt=extract_prompt or DEFAULT_KG_TRIPLET_EXTRACT_PROMPT,
parse_fn=parse_fn,
num_workers=num_workers,
max_paths_per_chunk=max_paths_per_chunk,
)
@classmethod
def class_name(cls) -> str:
return "SimpleLLMPathExtractor"
def __call__(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> Sequence[BaseNode]:
"""Extract triples from nodes."""
return asyncio.run(self.acall(nodes, show_progress=show_progress, **kwargs))
async def _aextract(self, node: BaseNode) -> BaseNode:
"""Extract triples from a node."""
assert hasattr(node, "text")
text = node.get_content(metadata_mode=MetadataMode.LLM)
try:
llm_response = await self.llm.apredict(
self.extract_prompt,
text=text,
max_knowledge_triplets=self.max_paths_per_chunk,
)
triples = self.parse_fn(llm_response)
except ValueError:
triples = []
existing_nodes = node.metadata.pop(KG_NODES_KEY, [])
existing_relations = node.metadata.pop(KG_RELATIONS_KEY, [])
metadata = node.metadata.copy()
for subj, rel, obj in triples:
subj_node = EntityNode(name=subj, properties=metadata)
obj_node = EntityNode(name=obj, properties=metadata)
rel_node = Relation(
label=rel,
source_id=subj_node.id,
target_id=obj_node.id,
properties=metadata,
)
existing_nodes.extend([subj_node, obj_node])
existing_relations.append(rel_node)
node.metadata[KG_NODES_KEY] = existing_nodes
node.metadata[KG_RELATIONS_KEY] = existing_relations
return node
async def acall(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> Sequence[BaseNode]:
"""Extract triples from nodes async."""
jobs = []
for node in nodes:
jobs.append(self._aextract(node))
return await run_jobs(
jobs,
workers=self.num_workers,
show_progress=show_progress,
desc="Extracting paths from text",
)
| SimpleLLMPathExtractor |
python | ipython__ipython | IPython/core/magics/__init__.py | {
"start": 1338,
"end": 1619
} | class ____(Magics):
"""Placeholder for user-defined magics to be added at runtime.
All magics are eventually merged into a single namespace at runtime, but we
use this class to isolate the magics defined dynamically by the user into
their own class.
"""
| UserMagics |
python | realpython__materials | django-markdown/dmd_app/models.py | {
"start": 31,
"end": 302
} | class ____(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
slug = models.SlugField(blank=True)
class Meta:
verbose_name_plural = "Markdown content"
def __str__(self):
return self.title
| MarkdownContent |
python | takluyver__flit | flit_core/flit_core/config.py | {
"start": 1580,
"end": 6860
} | class ____(ConfigError):
def __str__(self):
return ('Please specify console_scripts entry points, or [scripts] in '
'flit config, not both.')
def prep_toml_config(d, path):
"""Validate config loaded from pyproject.toml and prepare common metadata
Returns a LoadedConfig object.
"""
dtool = d.get('tool', {}).get('flit', {})
if 'metadata' in dtool:
raise ConfigError(
"The [tool.flit.metadata] table is no longer supported. "
"Switch to the standard [project] table or require flit_core<4 "
"to build this package."
)
if ('scripts' in dtool) or ('entrypoints' in dtool):
raise ConfigError(
"The [tool.flit.scripts] and [tool.flit.entrypoints] tables are no "
"longer supported. Use [project.scripts], [project.gui-scripts] or"
"[project.entry-points] as replacements."
)
if 'project' not in d:
raise ConfigError("No [project] table found in pyproject.toml")
loaded_cfg = read_pep621_metadata(d['project'], path)
module_tbl = dtool.get('module', {})
if 'name' in module_tbl:
loaded_cfg.module = module_tbl['name']
if 'import-names' in d['project']:
import_names_from_config = [
s.split(';')[0] for s in loaded_cfg.metadata['import_name']
]
if import_names_from_config != [loaded_cfg.module]:
raise ConfigError(
f"Specified import-names {import_names_from_config} do not match "
f"the module present ({loaded_cfg.module})"
)
else:
loaded_cfg.metadata['import_name'] = [loaded_cfg.module]
namespace_parts = loaded_cfg.module.split('.')[:-1]
nspkgs_from_mod_name = [
'.'.join(namespace_parts[:i]) for i in range(1, len(namespace_parts) + 1)
]
if 'import-namespaces' in d['project']:
nspkgs_from_config = [
s.split(';')[0] for s in loaded_cfg.metadata['import_namespace']
]
if set(nspkgs_from_config) != set(nspkgs_from_mod_name):
raise ConfigError(
f"Specified import-namespaces {nspkgs_from_config} do not match "
f"the namespace packages present ({nspkgs_from_mod_name})"
)
else:
loaded_cfg.metadata['import_namespace'] = nspkgs_from_mod_name
unknown_sections = set(dtool) - {'module', 'sdist', 'external-data'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unexpected tables in pyproject.toml: ' + ', '.join(
f'[tool.flit.{s}]' for s in unknown_sections
))
if 'sdist' in dtool:
unknown_keys = set(dtool['sdist']) - {'include', 'exclude'}
if unknown_keys:
raise ConfigError(
"Unknown keys in [tool.flit.sdist]:" + ", ".join(unknown_keys)
)
loaded_cfg.sdist_include_patterns = _check_glob_patterns(
dtool['sdist'].get('include', []), 'include'
)
exclude = [
"**/__pycache__",
"**.pyc",
] + dtool['sdist'].get('exclude', [])
loaded_cfg.sdist_exclude_patterns = _check_glob_patterns(
exclude, 'exclude'
)
data_dir = dtool.get('external-data', {}).get('directory', None)
if data_dir is not None:
toml_key = "tool.flit.external-data.directory"
if not isinstance(data_dir, str):
raise ConfigError(f"{toml_key} must be a string")
normp = osp.normpath(data_dir)
if isabs_ish(normp):
raise ConfigError(f"{toml_key} cannot be an absolute path")
if normp.startswith('..' + os.sep):
raise ConfigError(
f"{toml_key} cannot point outside the directory containing pyproject.toml"
)
if normp == '.':
raise ConfigError(
f"{toml_key} cannot refer to the directory containing pyproject.toml"
)
loaded_cfg.data_directory = path.parent / data_dir
if not loaded_cfg.data_directory.is_dir():
raise ConfigError(f"{toml_key} must refer to a directory")
return loaded_cfg
def _check_glob_patterns(pats, clude):
"""Check and normalise glob patterns for sdist include/exclude"""
if not isinstance(pats, list):
raise ConfigError(f"sdist {clude} patterns must be a list")
# Windows filenames can't contain these (nor * or ?, but they are part of
# glob patterns) - https://stackoverflow.com/a/31976060/434217
bad_chars = re.compile(r'[\000-\037<>:"\\]')
normed = []
for p in pats:
if bad_chars.search(p):
raise ConfigError(
f'{clude} pattern {p!r} contains bad characters (<>:\"\\ or control characters)'
)
normp = osp.normpath(p)
if isabs_ish(normp):
raise ConfigError(
f'{clude} pattern {p!r} is an absolute path'
)
if normp.startswith('..' + os.sep):
raise ConfigError(
f'{clude} pattern {p!r} points out of the directory containing pyproject.toml'
)
normed.append(normp)
return normed
| EntryPointsConflict |
python | getsentry__sentry | tests/sentry/db/models/test_utils.py | {
"start": 223,
"end": 2816
} | class ____(TestCase):
def test_works_with_standard_attrs(self) -> None:
org = self.create_organization()
assert is_model_attr_cached(org, "name") is True
def test_creation_association(self) -> None:
detector = self.create_detector()
assert is_model_attr_cached(detector, "workflow_condition_group") is False
detector.workflow_condition_group = self.create_data_condition_group()
detector.save()
refetched_detector = Detector.objects.get(id=detector.id)
# Detector maintains the association in memory
assert is_model_attr_cached(detector, "workflow_condition_group") is True
# When refetched, the association is not cached
assert is_model_attr_cached(refetched_detector, "workflow_condition_group") is False
def test_select_related(self) -> None:
detector = self.create_detector()
detector.workflow_condition_group = self.create_data_condition_group()
self.create_data_condition(
condition_group=detector.workflow_condition_group,
condition_result=75,
)
detector.save()
refetched_detector = (
Detector.objects.filter(id=detector.id)
.select_related("workflow_condition_group")
.first()
)
assert refetched_detector is not None
assert is_model_attr_cached(refetched_detector, "workflow_condition_group") is True
def test_prefetch(self) -> None:
detector = self.create_detector()
detector.workflow_condition_group = self.create_data_condition_group()
self.create_data_condition(
condition_group=detector.workflow_condition_group,
condition_result=75,
)
detector.save()
refetched_detector = (
Detector.objects.filter(id=detector.id)
.select_related("workflow_condition_group")
.prefetch_related("workflow_condition_group__conditions")
.first()
)
assert refetched_detector is not None
assert refetched_detector.workflow_condition_group is not None
assert is_model_attr_cached(refetched_detector, "workflow_condition_group") is True
assert (
is_model_attr_cached(refetched_detector.workflow_condition_group, "conditions") is True
)
# use the same model, but different query to ensure cache check is correct
another = Detector.objects.get(id=detector.id)
assert is_model_attr_cached(another, "workflow_condition_group") is False
| TestIsModelAttrCached |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 94849,
"end": 96884
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column("pid", Integer, primary_key=True),
Column("pdata", String(30)),
)
Table(
"child",
metadata,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("parent.pid")),
Column("cdata", String(30)),
)
@classmethod
def setup_classes(cls):
class P(cls.Comparable):
pass
class C(P):
pass
def test_row_switch_no_child_table(self):
P, C, parent, child = (
self.classes.P,
self.classes.C,
self.tables.parent,
self.tables.child,
)
self.mapper_registry.map_imperatively(P, parent)
self.mapper_registry.map_imperatively(C, child, inherits=P)
sess = fixture_session()
c1 = C(pid=1, cid=1, pdata="c1", cdata="c1")
sess.add(c1)
sess.flush()
# establish a row switch between c1 and c2.
# c2 has no value for the "child" table
c2 = C(pid=1, cid=1, pdata="c2")
sess.add(c2)
sess.delete(c1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE parent SET pdata=:pdata "
"WHERE parent.pid = :parent_pid",
{"pdata": "c2", "parent_pid": 1},
),
# this fires as of [ticket:1362], since we synchronzize
# PK/FKs on UPDATES. c2 is new so the history shows up as
# pure added, update occurs. If a future change limits the
# sync operation during _save_obj().update, this is safe to remove
# again.
CompiledSQL(
"UPDATE child SET pid=:pid WHERE child.cid = :child_cid",
{"pid": 1, "child_cid": 1},
),
)
| InheritingRowSwitchTest |
python | apache__thrift | lib/py/src/transport/TZlibTransport.py | {
"start": 1044,
"end": 2585
} | class ____:
"""Factory transport that builds zlib compressed transports.
This factory caches the last single client/transport that it was passed
and returns the same TZlibTransport object that was created.
This caching means the TServer class will get the _same_ transport
object for both input and output transports from this factory.
(For non-threaded scenarios only, since the cache only holds one object)
The purpose of this caching is to allocate only one TZlibTransport where
only one is really needed (since it must have separate read/write buffers),
and makes the statistics from getCompSavings() and getCompRatio()
easier to understand.
"""
# class scoped cache of last transport given and zlibtransport returned
_last_trans = None
_last_z = None
def getTransport(self, trans, compresslevel=9):
"""Wrap a transport, trans, with the TZlibTransport
compressed transport class, returning a new
transport to the caller.
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Defaults to 9.
@type compresslevel: int
This method returns a TZlibTransport which wraps the
passed C{trans} TTransport derived instance.
"""
if trans == self._last_trans:
return self._last_z
ztrans = TZlibTransport(trans, compresslevel)
self._last_trans = trans
self._last_z = ztrans
return ztrans
| TZlibTransportFactory |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 936567,
"end": 936951
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Repository", graphql_name="node")
"""The item at the end of the edge."""
| RepositoryEdge |
python | langchain-ai__langchain | libs/partners/openai/tests/integration_tests/chat_models/test_responses_standard.py | {
"start": 453,
"end": 4544
} | class ____(TestOpenAIStandard):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "use_responses_api": True}
@property
def supports_image_tool_message(self) -> bool:
return True
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with Path.open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(
model="o4-mini",
reasoning={"effort": "medium", "summary": "auto"},
use_responses_api=True,
)
input_ = "What was the 3rd highest building in 2000?"
return _invoke(llm, input_, stream)
@pytest.mark.flaky(retries=3, delay=1)
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
super().test_openai_pdf_inputs(model)
# Responses API additionally supports files via URL
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
message = HumanMessage(
[
{"type": "text", "text": "What is the document title, verbatim?"},
{"type": "file", "url": url},
]
)
_ = model.invoke([message])
# Test OpenAI Responses format
message = HumanMessage(
[
{"type": "text", "text": "What is the document title, verbatim?"},
{"type": "input_file", "file_url": url},
]
)
_ = model.invoke([message])
@property
def supports_pdf_tool_message(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_tool_messages(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs in `ToolMessage` objects."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
tool_message = ToolMessage(
content_blocks=[
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
"extras": {"filename": "my-pdf"}, # specify filename
},
],
tool_call_id="1",
name="random_pdf",
)
messages = [
HumanMessage(
"Get a random PDF using the tool and relay the title verbatim."
),
AIMessage(
[],
tool_calls=[
{
"type": "tool_call",
"id": "1",
"name": "random_pdf",
"args": {},
}
],
),
tool_message,
]
def random_pdf() -> str:
"""Return a random PDF."""
return ""
_ = model.bind_tools([random_pdf]).invoke(messages)
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
return cast(AIMessage, llm.invoke(input_))
| TestOpenAIResponses |
python | pytorch__pytorch | torch/_higher_order_ops/map.py | {
"start": 4061,
"end": 10025
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, f, num_mapped_args, *flat_args):
ctx._f = f
ctx._num_mapped_args = num_mapped_args
ctx._num_pos_args = len(flat_args) - num_mapped_args
# We snapshot the dispatch keys in forward for materializing the
# the bw_graph in backward.
ctx._fw_include_key_set = torch._C._dispatch_tls_local_include_set()
ctx._fw_exclude_key_set = torch._C._dispatch_tls_local_exclude_set()
save_tensors_and_symints_for_backward(ctx, flat_args)
with torch._C._AutoDispatchBelowAutograd():
return (
*map_impl(f, flat_args[:num_mapped_args], flat_args[num_mapped_args:]),
)
@staticmethod
def backward(ctx, *flat_grads):
fw_args = saved_tensors_and_symints(ctx)
num_mapped_args = ctx._num_mapped_args
num_pos_args = ctx._num_pos_args
num_grads = len(flat_grads)
fw_mapped_args, pos_args = split_into_chunks(
fw_args,
[
num_mapped_args,
num_pos_args,
],
)
bw_f = create_bw_fn(ctx._f, fw_args)
grads_tensor_masks = []
# Create a wrapper around thefor the bw_f
def bw_f_wrapper(*args):
nonlocal grads_tensor_masks
# Dissect args and re-order them for the ``ctx._bw_f``
# args provided to the wrapper are composed of [*fw_mapped_args, *flat_grads, *pos_args]
# The content of ``bw_f_tangents`` are the upstream gradients, i.e. flat_grads
# The content of ``bw_f_primals`` are the fw_args, i.e., [*fw_mapped_args, *pos_args]
# The bw_f requires *bw_f_primals, *bw_f_tangents
fw_m_args, bw_f_tangents, pos_args = split_into_chunks(
args, [num_mapped_args, num_grads, num_pos_args]
)
bw_f_primals = *fw_m_args, *pos_args
gradients = bw_f(*bw_f_primals, *bw_f_tangents)
grads_tensor_masks = [
True if isinstance(out, torch.Tensor) else out for out in gradients
]
return filter_with_masks(gradients, grads_tensor_masks)
def construct_args_single_step_bw():
unwrapped_mapped_xs = pytree.tree_map(_from_fun, fw_mapped_args)
example_xs = _unstack_pytree(unwrapped_mapped_xs)[0]
unwrapped_grads = pytree.tree_map(_from_fun, flat_grads)
example_grads = _unstack_pytree(unwrapped_grads)[0]
example_pos_args = [
_from_fun(arg) if isinstance(arg, torch.Tensor) else arg
for arg in pos_args
]
return *example_xs, *example_grads, *example_pos_args
with suspend_functionalization(), disable_functional_mode():
with disable_proxy_modes_tracing():
args_single_step_bw = construct_args_single_step_bw()
# TODO: we need to materialize the bw graphs because dynamo is unable to
# trace through the joint function when torch.compile torch.autograd.grad.
fn_bw_gm = materialize_as_graph(
bw_f_wrapper,
args_single_step_bw,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
force_enable_grad=True,
)
grads = map_impl(fn_bw_gm, fw_mapped_args + flat_grads, pos_args)
return None, None, *fill_none_with_masks(grads, grads_tensor_masks)
def trace_map(proxy_mode, func_overload, f, xs, pos_args):
with disable_proxy_modes_tracing():
example_input = _unstack_pytree(xs)[0]
body_graph = f
body_graph = reenter_make_fx(body_graph)(*example_input, *pos_args)
next_name = proxy_mode.tracer.get_fresh_qualname("body_graph_")
proxy_mode.tracer.root.register_module(next_name, body_graph)
fake_outs = map_impl(body_graph, xs, pos_args)
node_args = (body_graph, list(xs), list(pos_args))
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", func_overload, proxy_args, {}, name="map_impl"
)
return track_tensor_tree(
fake_outs, out_proxy, constant=None, tracer=proxy_mode.tracer
)
@map_impl.py_impl(DispatchKey.CompositeExplicitAutograd)
def map_dense(f, xs, pos_args):
pytrees = [f(*inp, *pos_args) for inp in _unstack_pytree(xs)]
return _stack_pytree(pytrees)
@map_impl.py_autograd_impl
def map_autograd(f, xs, pos_args):
num_mapped_args = len(xs)
flat_out = MapAutogradOp.apply(f, num_mapped_args, *xs, *pos_args)
return flat_out
@map_impl.py_impl(ProxyTorchDispatchMode)
def map_proxy_torch_dispatch_mode(mode, f, xs, args):
return trace_map(mode, map_impl, f, xs, args)
@map_impl.py_impl(FakeTensorMode)
def map_fake_tensor_mode(mode, f, xs, args):
with mode:
return map_dense(f, xs, args)
@map_impl.py_functionalize_impl
def map_functionalize(ctx, f, xs, pos_args):
from torch._higher_order_ops.utils import _check_alias_and_mutation
unwrapped_xs = ctx.unwrap_tensors(xs)
unwrapped_args = ctx.unwrap_tensors(pos_args)
wrapped_fn = ctx.functionalize(_maybe_run_with_interpreter(f))
with ctx.redispatch_to_next():
example_inputs = (*_unstack_pytree(unwrapped_xs)[0], *unwrapped_args)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
_check_alias_and_mutation(f, example_inputs, "map", pre_dispatch)
map_return = map_impl(wrapped_fn, unwrapped_xs, unwrapped_args)
return ctx.wrap_tensors(map_return)
def _fake_map(f, x, *args):
from functorch.experimental.control_flow import _stack_pytree, _unstack_pytree
x_pytrees = _unstack_pytree(x)
zs = []
for xp in x_pytrees:
zs.append(f(xp, *args))
return _stack_pytree(zs)
| MapAutogradOp |
python | getsentry__sentry | src/sentry/migrations/0927_dashboard_add_unique_constraint_user_dashboard.py | {
"start": 155,
"end": 1763
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0926_dashboard_favorite_defer_position_constraint"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
migrations.AddConstraint(
model_name="dashboardfavoriteuser",
constraint=models.UniqueConstraint(
fields=("user_id", "dashboard_id"),
name="sentry_dashboardfavoriteuser_user_id_dashboard_id_2c7267a5_uniq",
),
)
]
)
]
| Migration |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_bigquery.py | {
"start": 4156,
"end": 11979
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute(self, mock_hook):
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource={},
)
operator.execute(context=MagicMock())
mock_hook.return_value.create_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource={},
exists_ok=False,
schema_fields=None,
location=None,
timeout=None,
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_create_view(self, mock_hook):
body = {
"tableReference": {
"tableId": TEST_TABLE_ID,
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
},
"view": VIEW_DEFINITION,
}
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=body,
)
operator.execute(context=MagicMock())
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_create_materialized_view(self, mock_hook):
body = {
"tableReference": {
"tableId": TEST_TABLE_ID,
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
},
"materializedView": MATERIALIZED_VIEW_DEFINITION,
}
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=body,
)
operator.execute(context=MagicMock())
mock_hook.return_value.create_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
table_resource=body,
exists_ok=False,
location=None,
timeout=None,
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_create_clustered_table(self, mock_hook):
schema_fields = [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "date_hired", "type": "DATE", "mode": "REQUIRED"},
{"name": "date_birth", "type": "DATE", "mode": "NULLABLE"},
]
time_partitioning = {"type": "DAY", "field": "date_hired"}
cluster_fields = ["date_birth"]
body = {
"tableReference": {
"tableId": TEST_TABLE_ID,
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
},
"schema": schema_fields,
"timePartitioning": time_partitioning,
"clusterFields": cluster_fields,
}
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=body,
)
operator.execute(context=MagicMock())
mock_hook.return_value.create_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=body,
exists_ok=False,
schema_fields=None,
timeout=None,
location=None,
)
@pytest.mark.parametrize(
("if_exists", "is_conflict", "expected_error", "log_msg"),
[
("ignore", False, None, None),
("log", False, None, None),
("log", True, None, f"Table {TEST_DATASET}.{TEST_TABLE_ID} already exists."),
("fail", False, None, None),
("fail", True, AirflowException, None),
("skip", False, None, None),
("skip", True, AirflowSkipException, None),
],
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_create_existing_table(self, mock_hook, caplog, if_exists, is_conflict, expected_error, log_msg):
body = {
"tableReference": {
"tableId": TEST_TABLE_ID,
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
},
"view": VIEW_DEFINITION,
}
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=body,
if_exists=if_exists,
)
if is_conflict:
mock_hook.return_value.create_table.side_effect = Conflict("any")
else:
mock_hook.return_value.create_table.side_effect = None
if expected_error is not None:
with pytest.raises(expected_error):
operator.execute(context=MagicMock())
else:
operator.execute(context=MagicMock())
if log_msg is not None:
assert log_msg in caplog.text
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_get_openlineage_facets_on_complete(self, mock_hook):
schema_fields = [
{"name": "field1", "type": "STRING", "description": "field1 description"},
{"name": "field2", "type": "INTEGER"},
]
table_resource = {
"tableReference": {
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
"tableId": TEST_TABLE_ID,
},
"description": "Table description.",
"schema": {"fields": schema_fields},
}
mock_hook.return_value.create_table.return_value = Table.from_api_repr(table_resource)
operator = BigQueryCreateTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=table_resource,
)
operator.execute(context=MagicMock())
mock_hook.return_value.create_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
table_resource=table_resource,
exists_ok=False,
schema_fields=None,
location=None,
timeout=None,
)
result = operator.get_openlineage_facets_on_complete(None)
assert not result.run_facets
assert not result.job_facets
assert not result.inputs
assert len(result.outputs) == 1
assert result.outputs[0].namespace == BIGQUERY_NAMESPACE
assert result.outputs[0].name == f"{TEST_GCP_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert result.outputs[0].facets == {
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
),
"documentation": DocumentationDatasetFacet(description="Table description."),
}
| TestBigQueryCreateTableOperator |
python | mlflow__mlflow | mlflow/genai/judges/tools/get_span_performance_and_timing_report.py | {
"start": 643,
"end": 944
} | class ____:
"""Timing data for a single span."""
span_id: str
name: str
span_type: str
total_duration_s: float
self_duration_s: float
child_duration_s: float
span_number: str
parent_number: str | None
ancestors: list[str]
depth: int
@dataclass
| SpanTimingData |
python | kamyu104__LeetCode-Solutions | Python/amount-of-time-for-binary-tree-to-be-infected.py | {
"start": 2516,
"end": 3683
} | class ____(object):
def amountOfTime(self, root, start):
"""
:type root: Optional[TreeNode]
:type start: int
:rtype: int
"""
def bfs(root):
adj = collections.defaultdict(list)
q = [root]
while q:
new_q = []
for u in q:
for v in (u.left, u.right):
if v is None:
continue
adj[u.val].append(v.val)
adj[v.val].append(u.val)
new_q.append(v)
q = new_q
return adj
def bfs2(adj, start):
result = -1
q = [start]
lookup = {start}
while q:
new_q = []
for u in q:
for v in adj[u]:
if v in lookup:
continue
lookup.add(v)
new_q.append(v)
q = new_q
result += 1
return result
adj = bfs(root)
return bfs2(adj, start)
| Solution3 |
python | django__django | tests/db_functions/comparison/test_nullif.py | {
"start": 211,
"end": 1718
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda", alias="Rhonda")
def test_basic(self):
authors = Author.objects.annotate(nullif=NullIf("alias", "name")).values_list(
"nullif"
)
self.assertCountEqual(
authors,
[
("smithj",),
(
(
""
if connection.features.interprets_empty_strings_as_nulls
else None
),
),
],
)
def test_null_argument(self):
authors = Author.objects.annotate(
nullif=NullIf("name", Value(None))
).values_list("nullif")
self.assertCountEqual(authors, [("John Smith",), ("Rhonda",)])
def test_too_few_args(self):
msg = "'NullIf' takes exactly 2 arguments (1 given)"
with self.assertRaisesMessage(TypeError, msg):
NullIf("name")
@skipUnless(connection.vendor == "oracle", "Oracle specific test for NULL-literal")
def test_null_literal(self):
msg = "Oracle does not allow Value(None) for expression1."
with self.assertRaisesMessage(ValueError, msg):
list(
Author.objects.annotate(nullif=NullIf(Value(None), "name")).values_list(
"nullif"
)
)
| NullIfTests |
python | Textualize__textual | docs/blog/snippets/2022-12-07-responsive-app-background-task/nonblocking01.py | {
"start": 448,
"end": 1068
} | class ____(App[None]):
BINDINGS = [("l", "load", "Load data")]
CSS = """
Grid {
grid-size: 2;
}
"""
def compose(self) -> ComposeResult:
yield Grid(
ColourChanger(),
VerticalScroll(id="log"),
)
yield Footer()
def action_load(self) -> None:
asyncio.create_task(self._do_long_operation())
async def _do_long_operation(self) -> None:
self.query_one("#log").mount(Label("Starting ⏳")) # (1)!
await asyncio.sleep(5) # (2)!
self.query_one("#log").mount(Label("Data loaded ✅")) # (3)!
MyApp().run()
| MyApp |
python | doocs__leetcode | lcof/面试题19. 正则表达式匹配/Solution2.py | {
"start": 0,
"end": 583
} | class ____:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
f = [[False] * (n + 1) for _ in range(m + 1)]
f[0][0] = True
for i in range(m + 1):
for j in range(1, n + 1):
if p[j - 1] == "*":
f[i][j] = f[i][j - 2]
if i > 0 and (p[j - 2] == "." or s[i - 1] == p[j - 2]):
f[i][j] |= f[i - 1][j]
elif i > 0 and (p[j - 1] == "." or s[i - 1] == p[j - 1]):
f[i][j] = f[i - 1][j - 1]
return f[m][n]
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 213944,
"end": 214080
} | class ____(Protocol[Input, Output]):
def __call__(self, _in: Input, /, *, config: RunnableConfig) -> Output: ...
| _RunnableCallableSync |
python | aio-libs__aiohttp | tests/test_websocket_parser.py | {
"start": 21437,
"end": 23193
} | class ____:
def test_ctor(self) -> None:
err = WebSocketError(WSCloseCode.PROTOCOL_ERROR, "Something invalid")
assert err.code == WSCloseCode.PROTOCOL_ERROR
assert str(err) == "Something invalid"
def test_pickle(self) -> None:
err = WebSocketError(WSCloseCode.PROTOCOL_ERROR, "Something invalid")
err.foo = "bar" # type: ignore[attr-defined]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(err, proto)
err2 = pickle.loads(pickled)
assert err2.code == WSCloseCode.PROTOCOL_ERROR
assert str(err2) == "Something invalid"
assert err2.foo == "bar"
def test_flow_control_binary(
protocol: BaseProtocol,
out_low_limit: WebSocketDataQueue,
parser_low_limit: PatchableWebSocketReader,
) -> None:
large_payload = b"b" * (1 + 16 * 2)
large_payload_size = len(large_payload)
parser_low_limit._handle_frame(True, WSMsgType.BINARY, large_payload, 0)
res = out_low_limit._buffer[0]
assert res == WSMessageBinary(data=large_payload, size=large_payload_size, extra="")
assert protocol._reading_paused is True
def test_flow_control_multi_byte_text(
protocol: BaseProtocol,
out_low_limit: WebSocketDataQueue,
parser_low_limit: PatchableWebSocketReader,
) -> None:
large_payload_text = "𒀁" * (1 + 16 * 2)
large_payload = large_payload_text.encode("utf-8")
large_payload_size = len(large_payload)
parser_low_limit._handle_frame(True, WSMsgType.TEXT, large_payload, 0)
res = out_low_limit._buffer[0]
assert res == WSMessageText(
data=large_payload_text, size=large_payload_size, extra=""
)
assert protocol._reading_paused is True
| TestWebSocketError |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/10_A3C/A3C_RNN.py | {
"start": 890,
"end": 5384
} | class ____(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=[0, 1]), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('critic'): # only critic controls the rnn update
cell_size = 64
s = tf.expand_dims(self.s, axis=1,
name='timely_input') # [time_step, feature] => [time_step, batch, feature]
rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size)
self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
outputs, self.final_state = tf.nn.dynamic_rnn(
cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True)
cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
l_c = tf.layers.dense(cell_out, 50, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
with tf.variable_scope('actor'): # state representation is based on critic
l_a = tf.layers.dense(cell_out, 80, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s, cell_state): # run by a local
s = s[np.newaxis, :]
a, cell_state = SESS.run([self.A, self.final_state], {self.s: s, self.init_state: cell_state})
return a, cell_state
| ACNet |
python | ansible__ansible | lib/ansible/_internal/_templating/_marker_behaviors.py | {
"start": 525,
"end": 1157
} | class ____(MarkerBehavior):
"""
The default behavior when encountering a `Marker` value during concatenation or finalization.
This always raises the template-internal `MarkerError` exception.
"""
def handle_marker(self, value: Marker) -> t.Any:
value.trip()
# FAIL_ON_MARKER_BEHAVIOR
# _DETONATE_MARKER_BEHAVIOR - internal singleton since it's the default and nobody should need to reference it, or make it an actual singleton
FAIL_ON_UNDEFINED: t.Final = FailingMarkerBehavior() # no sense in making many instances...
@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
| FailingMarkerBehavior |
python | kubernetes-client__python | kubernetes/client/api/networking_v1beta1_api.py | {
"start": 543,
"end": 213296
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_ip_address(self, body, **kwargs): # noqa: E501
"""create_ip_address # noqa: E501
create an IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ip_address(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1IPAddress body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1IPAddress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_ip_address_with_http_info(body, **kwargs) # noqa: E501
def create_ip_address_with_http_info(self, body, **kwargs): # noqa: E501
"""create_ip_address # noqa: E501
create an IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ip_address_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1IPAddress body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1IPAddress, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1IPAddress', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_service_cidr(self, body, **kwargs): # noqa: E501
"""create_service_cidr # noqa: E501
create a ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_service_cidr(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_service_cidr_with_http_info(body, **kwargs) # noqa: E501
def create_service_cidr_with_http_info(self, body, **kwargs): # noqa: E501
"""create_service_cidr # noqa: E501
create a ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_service_cidr_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_service_cidr`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_ip_address(self, **kwargs): # noqa: E501
"""delete_collection_ip_address # noqa: E501
delete collection of IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_ip_address(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_ip_address_with_http_info(**kwargs) # noqa: E501
def delete_collection_ip_address_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_ip_address # noqa: E501
delete collection of IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_ip_address_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_service_cidr(self, **kwargs): # noqa: E501
"""delete_collection_service_cidr # noqa: E501
delete collection of ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_service_cidr(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_service_cidr_with_http_info(**kwargs) # noqa: E501
def delete_collection_service_cidr_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_service_cidr # noqa: E501
delete collection of ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_service_cidr_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_ip_address(self, name, **kwargs): # noqa: E501
"""delete_ip_address # noqa: E501
delete an IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ip_address(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_ip_address_with_http_info(name, **kwargs) # noqa: E501
def delete_ip_address_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_ip_address # noqa: E501
delete an IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ip_address_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_service_cidr(self, name, **kwargs): # noqa: E501
"""delete_service_cidr # noqa: E501
delete a ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_cidr(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_service_cidr_with_http_info(name, **kwargs) # noqa: E501
def delete_service_cidr_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_service_cidr # noqa: E501
delete a ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_cidr_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_service_cidr`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_ip_address(self, **kwargs): # noqa: E501
"""list_ip_address # noqa: E501
list or watch objects of kind IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_ip_address(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1IPAddressList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_ip_address_with_http_info(**kwargs) # noqa: E501
def list_ip_address_with_http_info(self, **kwargs): # noqa: E501
"""list_ip_address # noqa: E501
list or watch objects of kind IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_ip_address_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1IPAddressList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1IPAddressList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_service_cidr(self, **kwargs): # noqa: E501
"""list_service_cidr # noqa: E501
list or watch objects of kind ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_service_cidr(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDRList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_service_cidr_with_http_info(**kwargs) # noqa: E501
def list_service_cidr_with_http_info(self, **kwargs): # noqa: E501
"""list_service_cidr # noqa: E501
list or watch objects of kind ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_service_cidr_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDRList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDRList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_ip_address(self, name, body, **kwargs): # noqa: E501
"""patch_ip_address # noqa: E501
partially update the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_ip_address(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1IPAddress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_ip_address_with_http_info(name, body, **kwargs) # noqa: E501
def patch_ip_address_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_ip_address # noqa: E501
partially update the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_ip_address_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1IPAddress, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_ip_address`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1IPAddress', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_service_cidr(self, name, body, **kwargs): # noqa: E501
"""patch_service_cidr # noqa: E501
partially update the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_service_cidr(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_service_cidr_with_http_info(name, body, **kwargs) # noqa: E501
def patch_service_cidr_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_service_cidr # noqa: E501
partially update the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_service_cidr_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_service_cidr`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_service_cidr`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_service_cidr_status(self, name, body, **kwargs): # noqa: E501
"""patch_service_cidr_status # noqa: E501
partially update status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_service_cidr_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_service_cidr_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_service_cidr_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_service_cidr_status # noqa: E501
partially update status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_service_cidr_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_service_cidr_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_service_cidr_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_service_cidr_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_ip_address(self, name, **kwargs): # noqa: E501
"""read_ip_address # noqa: E501
read the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_ip_address(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1IPAddress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_ip_address_with_http_info(name, **kwargs) # noqa: E501
def read_ip_address_with_http_info(self, name, **kwargs): # noqa: E501
"""read_ip_address # noqa: E501
read the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_ip_address_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1IPAddress, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1IPAddress', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_service_cidr(self, name, **kwargs): # noqa: E501
"""read_service_cidr # noqa: E501
read the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_service_cidr(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_service_cidr_with_http_info(name, **kwargs) # noqa: E501
def read_service_cidr_with_http_info(self, name, **kwargs): # noqa: E501
"""read_service_cidr # noqa: E501
read the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_service_cidr_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_service_cidr`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_service_cidr_status(self, name, **kwargs): # noqa: E501
"""read_service_cidr_status # noqa: E501
read status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_service_cidr_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_service_cidr_status_with_http_info(name, **kwargs) # noqa: E501
def read_service_cidr_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_service_cidr_status # noqa: E501
read status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_service_cidr_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_service_cidr_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_service_cidr_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_ip_address(self, name, body, **kwargs): # noqa: E501
"""replace_ip_address # noqa: E501
replace the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_ip_address(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param V1beta1IPAddress body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1IPAddress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_ip_address_with_http_info(name, body, **kwargs) # noqa: E501
def replace_ip_address_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_ip_address # noqa: E501
replace the specified IPAddress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_ip_address_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the IPAddress (required)
:param V1beta1IPAddress body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1IPAddress, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_ip_address" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_ip_address`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/ipaddresses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1IPAddress', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_service_cidr(self, name, body, **kwargs): # noqa: E501
"""replace_service_cidr # noqa: E501
replace the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_service_cidr(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_service_cidr_with_http_info(name, body, **kwargs) # noqa: E501
def replace_service_cidr_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_service_cidr # noqa: E501
replace the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_service_cidr_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_service_cidr" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_service_cidr`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_service_cidr`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_service_cidr_status(self, name, body, **kwargs): # noqa: E501
"""replace_service_cidr_status # noqa: E501
replace status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_service_cidr_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1ServiceCIDR
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_service_cidr_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_service_cidr_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_service_cidr_status # noqa: E501
replace status of the specified ServiceCIDR # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_service_cidr_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ServiceCIDR (required)
:param V1beta1ServiceCIDR body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1ServiceCIDR, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_service_cidr_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_service_cidr_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_service_cidr_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/v1beta1/servicecidrs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ServiceCIDR', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| NetworkingV1beta1Api |
python | django__django | django/template/defaulttags.py | {
"start": 29677,
"end": 54595
} | class ____(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super().__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag("if")
def do_if(parser, token):
"""
Evaluate a variable, and if that variable is "true" (i.e., exists, is not
empty, and is not a false boolean value), output the contents of the block:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
The ``if`` tag may take one or several `` {% elif %}`` clauses, as well as
an ``{% else %}`` clause that will be displayed if all previous conditions
fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both athletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith("elif"):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == "else":
nodelist = parser.parse(("endif",))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
if token.contents != "endif":
raise TemplateSyntaxError(
'Malformed template tag at line {}: "{}"'.format(
token.lineno, token.contents
)
)
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Check if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Check its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(("else", "endifchanged"))
token = parser.next_token()
if token.contents == "else":
nodelist_false = parser.parse(("endifchanged",))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
def find_library(parser, name):
try:
return parser.libraries[name]
except KeyError:
raise TemplateSyntaxError(
"'%s' is not a registered tag library. Must be one of:\n%s"
% (
name,
"\n".join(sorted(parser.libraries)),
),
)
def load_from_library(library, label, names):
"""
Return a subset of tags and filters from a library.
"""
subset = Library()
for name in names:
found = False
if name in library.tags:
found = True
subset.tags[name] = library.tags[name]
if name in library.filters:
found = True
subset.filters[name] = library.filters[name]
if found is False:
raise TemplateSyntaxError(
"'%s' is not a valid tag or filter in tag library '%s'"
% (
name,
label,
),
)
return subset
@register.tag
def load(parser, token):
"""
Load a custom template tag library into the parser.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
# from syntax is used; load individual tags from the library
name = bits[-1]
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
else:
# one or more libraries are specified; load and add them to the parser
for name in bits[1:]:
lib = find_library(parser, name)
parser.add_library(lib)
return LoadNode()
@register.tag
def lorem(parser, token):
"""
Create random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` outputs the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` outputs the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` outputs two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != "random"
if not common:
bits.pop()
# Method bit
if bits[-1] in ("w", "p", "b"):
method = bits.pop()
else:
method = "b"
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = "1"
count = parser.compile_filter(count)
if len(bits) != 1:
raise TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
@register.tag
def now(parser, token):
"""
Display the date, formatted according to the given string.
Use the same format as PHP's ``date()`` function; see https://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) == 4 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string, asvar)
@register.tag(name="partialdef")
def partialdef_func(parser, token):
"""
Declare a partial that can be used in the template.
Usage::
{% partialdef partial_name %}
Content goes here.
{% endpartialdef %}
Store the nodelist in the context under the key "partials". It can be
retrieved using the ``{% partial %}`` tag.
The optional ``inline`` argument renders the partial's contents
immediately, at the point where it is defined.
"""
match token.split_contents():
case "partialdef", partial_name, "inline":
inline = True
case "partialdef", partial_name, _:
raise TemplateSyntaxError(
"The 'inline' argument does not have any parameters; either use "
"'inline' or remove it completely."
)
case "partialdef", partial_name:
inline = False
case ["partialdef"]:
raise TemplateSyntaxError("'partialdef' tag requires a name")
case _:
raise TemplateSyntaxError("'partialdef' tag takes at most 2 arguments")
# Parse the content until the end tag.
valid_endpartials = ("endpartialdef", f"endpartialdef {partial_name}")
pos_open = getattr(token, "position", None)
source_start = pos_open[0] if isinstance(pos_open, tuple) else None
nodelist = parser.parse(valid_endpartials)
endpartial = parser.next_token()
if endpartial.contents not in valid_endpartials:
parser.invalid_block_tag(endpartial, "endpartialdef", valid_endpartials)
pos_close = getattr(endpartial, "position", None)
source_end = pos_close[1] if isinstance(pos_close, tuple) else None
# Store the partial nodelist in the parser.extra_data attribute.
partials = parser.extra_data.setdefault("partials", {})
if partial_name in partials:
raise TemplateSyntaxError(
f"Partial '{partial_name}' is already defined in the "
f"'{parser.origin.name}' template."
)
partials[partial_name] = PartialTemplate(
nodelist,
parser.origin,
partial_name,
source_start=source_start,
source_end=source_end,
)
return PartialDefNode(partial_name, inline, nodelist)
@register.tag(name="partial")
def partial_func(parser, token):
"""
Render a partial previously declared with the ``{% partialdef %}`` tag.
Usage::
{% partial partial_name %}
"""
match token.split_contents():
case "partial", partial_name:
extra_data = parser.extra_data
partial_mapping = DeferredSubDict(extra_data, "partials")
return PartialNode(partial_name, partial_mapping=partial_mapping)
case _:
raise TemplateSyntaxError("'partial' tag requires a single argument")
@register.simple_tag(name="querystring", takes_context=True)
def querystring(context, *args, **kwargs):
"""
Build a query string using `args` and `kwargs` arguments.
This tag constructs a new query string by adding, removing, or modifying
parameters from the given positional and keyword arguments. Positional
arguments must be mappings (such as `QueryDict` or `dict`), and
`request.GET` is used as the starting point if `args` is empty.
Keyword arguments are treated as an extra, final mapping. These mappings
are processed sequentially, with later arguments taking precedence.
A query string prefixed with `?` is returned.
Raise TemplateSyntaxError if a positional argument is not a mapping or if
keys are not strings.
For example::
{# Set a parameter on top of `request.GET` #}
{% querystring foo=3 %}
{# Remove a key from `request.GET` #}
{% querystring foo=None %}
{# Use with pagination #}
{% querystring page=page_obj.next_page_number %}
{# Use a custom ``QueryDict`` #}
{% querystring my_query_dict foo=3 %}
{# Use multiple positional and keyword arguments #}
{% querystring my_query_dict my_dict foo=3 bar=None %}
"""
if not args:
args = [context.request.GET]
params = QueryDict(mutable=True)
for d in [*args, kwargs]:
if not isinstance(d, Mapping):
raise TemplateSyntaxError(
"querystring requires mappings for positional arguments (got "
"%r instead)." % d
)
for key, value in d.items():
if not isinstance(key, str):
raise TemplateSyntaxError(
"querystring requires strings for mapping keys (got %r "
"instead)." % key
)
if value is None:
params.pop(key, None)
elif isinstance(value, Iterable) and not isinstance(value, str):
params.setlist(key, value)
else:
params[key] = value
query_string = params.urlencode() if params else ""
return f"?{query_string}"
@register.tag
def regroup(parser, token):
"""
Regroup a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``musicians`` is a list of ``Musician`` objects that have ``name`` and
``instrument`` attributes, and you'd like to display a list that
looks like:
* Guitar:
* Django Reinhardt
* Emily Remler
* Piano:
* Lovie Austin
* Bud Powell
* Trumpet:
* Duke Ellington
The following snippet of template code would accomplish this dubious task::
{% regroup musicians by instrument as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for musician in group.list %}
<li>{{ musician.name }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano``
and ``Trumpet``, and ``list`` is the list of musicians who play this
instrument.
Note that ``{% regroup %}`` does not work when the list to be grouped is
not sorted by the key you are grouping by! This means that if your list of
musicians was not sorted by instrument, you'd need to make sure it is
sorted before using it, i.e.::
{% regroup musicians|dictsort:"instrument" by instrument as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != "by":
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != "as":
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(
var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]
)
return RegroupNode(target, expression, var_name)
@register.tag
def resetcycle(parser, token):
"""
Reset a cycle tag.
If an argument is given, reset the last rendered cycle tag whose name
matches the argument, else reset the last rendered cycle tag (named or
unnamed).
"""
args = token.split_contents()
if len(args) > 2:
raise TemplateSyntaxError("%r tag accepts at most one argument." % args[0])
if len(args) == 2:
name = args[1]
try:
return ResetCycleNode(parser._named_cycle_nodes[name])
except (AttributeError, KeyError):
raise TemplateSyntaxError("Named cycle '%s' does not exist." % name)
try:
return ResetCycleNode(parser._last_cycle_node)
except AttributeError:
raise TemplateSyntaxError("No cycles in template.")
@register.tag
def spaceless(parser, token):
"""
Remove whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example returns this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` isn't stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(("endspaceless",))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Output one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError(
"Invalid templatetag argument: '%s'."
" Must be one of: %s" % (tag, list(TemplateTagNode.mapping))
)
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
r"""
Return an absolute URL matching the given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "url_name" arg1 arg2 %}
or
{% url "url_name" name1=value1 name2=value2 %}
The first argument is a URL pattern name. Other arguments are
space-separated values that will be filled in place of positional and
keyword arguments in the URL. Don't mix positional and keyword arguments.
All arguments for the URL must be present.
For example, if you have a view ``app_name.views.client_details`` taking
the client's id and the corresponding line in a URLconf looks like this::
path(
'client/<int:id>/',
views.client_details,
name='client-detail-view',
)
and this app's URLconf is included into the project's URLconf under some
path::
path('clients/', include('app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "client-detail-view" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument may also be the name of a template variable that will be
evaluated to obtain the view name or the URL name, e.g.::
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"'%s' takes at least one argument, a URL pattern name." % bits[0]
)
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stop the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(("endverbatim",))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such. Calculate the ratio of a given value to a
maximum value, and then apply that ratio to a constant.
For example::
<img src="bar.png" alt="Bar"
height="10"
width="{% widthratio this_value max_value max_width %}">
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
In some cases you might want to capture the result of widthratio in a
variable. It can be useful for instance in a blocktranslate like this::
{% widthratio this_value max_value max_width as width %}
{% blocktranslate %}The width is: {{ width }}{% endblocktranslate %}
"""
bits = token.split_contents()
if len(bits) == 4:
tag, this_value_expr, max_value_expr, max_width = bits
asvar = None
elif len(bits) == 6:
tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits
if as_ != "as":
raise TemplateSyntaxError(
"Invalid syntax in widthratio tag. Expecting 'as' keyword"
)
else:
raise TemplateSyntaxError("widthratio takes at least three arguments")
return WidthRatioNode(
parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
asvar=asvar,
)
@register.tag("with")
def do_with(parser, token):
"""
Add one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError(
"%r expected at least one variable assignment" % bits[0]
)
if remaining_bits:
raise TemplateSyntaxError(
"%r received an invalid token: %r" % (bits[0], remaining_bits[0])
)
nodelist = parser.parse(("endwith",))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| TemplateIfParser |
python | mlflow__mlflow | tests/pyfunc/test_chat_agent.py | {
"start": 2970,
"end": 16030
} | class ____(ChatAgent):
def predict(
self, messages: list[ChatAgentMessage], context: ChatContext, custom_inputs: dict[str, Any]
) -> ChatAgentResponse:
mock_response = get_mock_response(messages)
return ChatAgentResponse(
**mock_response,
custom_outputs=custom_inputs,
)
def test_chat_agent_save_load(tmp_path):
model = SimpleChatAgent()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
assert isinstance(loaded_model._model_impl, _ChatAgentPyfuncWrapper)
input_schema = loaded_model.metadata.get_input_schema()
output_schema = loaded_model.metadata.get_output_schema()
assert input_schema == CHAT_AGENT_INPUT_SCHEMA
assert output_schema == CHAT_AGENT_OUTPUT_SCHEMA
def test_chat_agent_save_load_dict_output(tmp_path):
model = SimpleDictChatAgent()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
assert isinstance(loaded_model._model_impl, _ChatAgentPyfuncWrapper)
input_schema = loaded_model.metadata.get_input_schema()
output_schema = loaded_model.metadata.get_output_schema()
assert input_schema == CHAT_AGENT_INPUT_SCHEMA
assert output_schema == CHAT_AGENT_OUTPUT_SCHEMA
def test_chat_agent_trace(tmp_path):
model = SimpleChatAgent()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
# predict() call during saving chat model should not generate a trace
assert len(get_traces()) == 0
loaded_model = mlflow.pyfunc.load_model(tmp_path)
messages = [{"role": "user", "content": "Hello!"}]
loaded_model.predict({"messages": messages})
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.tags[TraceTagKey.TRACE_NAME] == "predict"
request = json.loads(traces[0].data.request)
assert [{k: v for k, v in msg.items() if k != "id"} for msg in request["messages"]] == [
{k: v for k, v in ChatAgentMessage(**msg).model_dump().items() if k != "id"}
for msg in messages
]
def test_chat_agent_save_throws_with_signature(tmp_path):
model = SimpleChatAgent()
with pytest.raises(MlflowException, match="Please remove the `signature` parameter"):
mlflow.pyfunc.save_model(
python_model=model,
path=tmp_path,
signature=ModelSignature(
inputs=Schema([ColSpec(name="test", type=DataType.string)]),
),
)
@pytest.mark.parametrize(
"ret",
[
"not a ChatAgentResponse",
{"dict": "with", "bad": "keys"},
{
"id": "1",
"created": 1,
"model": "m",
"choices": [{"bad": "choice"}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
},
},
],
)
def test_save_throws_on_invalid_output(tmp_path, ret):
class BadChatAgent(ChatAgent):
def predict(
self,
messages: list[ChatAgentMessage],
context: ChatContext,
custom_inputs: dict[str, Any],
) -> ChatAgentResponse:
return ret
model = BadChatAgent()
with pytest.raises(
MlflowException,
match=("Failed to save ChatAgent. Ensure your model's predict"),
):
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
def test_chat_agent_predict(tmp_path):
model = ChatAgentWithCustomInputs()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
# test that a single dictionary will work
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
response = loaded_model.predict({"messages": messages})
assert response["messages"][0]["content"] == "You are a helpful assistant"
def test_chat_agent_works_with_infer_signature_input_example():
model = SimpleChatAgent()
input_example = {
"messages": [
{
"role": "system",
"content": "You are in helpful assistant!",
},
{
"role": "user",
"content": "What is Retrieval-augmented Generation?",
},
],
"context": {
"conversation_id": "123",
"user_id": "456",
},
"stream": False, # this is set by default
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example
)
assert model_info.signature.inputs == CHAT_AGENT_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_AGENT_OUTPUT_SCHEMA
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
loaded_input_example = mlflow_model.load_input_example(local_path)
# drop the generated UUID
loaded_input_example["messages"] = [
{k: v for k, v in msg.items() if k != "id"} for msg in loaded_input_example["messages"]
]
assert loaded_input_example == input_example
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
model_response = json.loads(response.content)
assert model_response["messages"][0]["content"] == "You are in helpful assistant!"
def test_chat_agent_logs_default_metadata_task():
model = SimpleChatAgent()
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(name="model", python_model=model)
assert model_info.signature.inputs == CHAT_AGENT_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_AGENT_OUTPUT_SCHEMA
assert model_info.metadata["task"] == "agent/v2/chat"
with mlflow.start_run():
model_info_with_override = mlflow.pyfunc.log_model(
name="model", python_model=model, metadata={"task": None}
)
assert model_info_with_override.metadata["task"] is None
def test_chat_agent_works_with_chat_agent_request_input_example():
model = SimpleChatAgent()
input_example_no_params = {"messages": [{"role": "user", "content": "What is rag?"}]}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example_no_params
)
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
assert mlflow_model.load_input_example(local_path) == input_example_no_params
input_example_with_params = {
"messages": [{"role": "user", "content": "What is rag?"}],
"context": {"conversation_id": "121", "user_id": "123"},
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example_with_params
)
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
assert mlflow_model.load_input_example(local_path) == input_example_with_params
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
model_response = json.loads(response.content)
assert model_response["messages"][0]["content"] == "What is rag?"
def test_chat_agent_predict_stream(tmp_path):
model = SimpleChatAgent()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
messages = [
{"role": "user", "content": "Hello!"},
]
responses = list(loaded_model.predict_stream({"messages": messages}))
for i, resp in enumerate(responses[:-1]):
assert resp["delta"]["content"] == f"message {i}"
def test_chat_agent_can_receive_and_return_custom():
messages = [{"role": "user", "content": "Hello!"}]
input_example = {
"messages": messages,
"custom_inputs": {"image_url": "example", "detail": "high", "other_dict": {"key": "value"}},
}
model = ChatAgentWithCustomInputs()
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=model,
input_example=input_example,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
# test that it works for normal pyfunc predict
response = loaded_model.predict(input_example)
assert response["custom_outputs"] == input_example["custom_inputs"]
# test that it works in serving
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
serving_response = json.loads(response.content)
assert serving_response["custom_outputs"] == input_example["custom_inputs"]
def test_chat_agent_predict_wrapper():
model = ChatAgentWithCustomInputs()
dict_input_example = {
"messages": [{"role": "user", "content": "What is rag?"}],
"context": {"conversation_id": "121", "user_id": "123"},
"custom_inputs": {"image_url": "example", "detail": "high", "other_dict": {"key": "value"}},
}
chat_agent_request = ChatAgentRequest(**dict_input_example)
pydantic_input_example = (
chat_agent_request.messages,
chat_agent_request.context,
chat_agent_request.custom_inputs,
)
dict_input_response = model.predict(dict_input_example)
pydantic_input_response = model.predict(*pydantic_input_example)
assert dict_input_response.messages[0].id is not None
del dict_input_response.messages[0].id
assert pydantic_input_response.messages[0].id is not None
del pydantic_input_response.messages[0].id
assert dict_input_response == pydantic_input_response
no_context_dict_input_example = {**dict_input_example, "context": None}
no_context_pydantic_input_example = (
chat_agent_request.messages,
None,
chat_agent_request.custom_inputs,
)
dict_input_response = model.predict(no_context_dict_input_example)
pydantic_input_response = model.predict(*no_context_pydantic_input_example)
assert dict_input_response.messages[0].id is not None
del dict_input_response.messages[0].id
assert pydantic_input_response.messages[0].id is not None
del pydantic_input_response.messages[0].id
assert dict_input_response == pydantic_input_response
model = SimpleChatAgent()
dict_input_response = model.predict(dict_input_example)
pydantic_input_response = model.predict(*pydantic_input_example)
assert dict_input_response.messages[0].id is not None
del dict_input_response.messages[0].id
assert pydantic_input_response.messages[0].id is not None
del pydantic_input_response.messages[0].id
assert dict_input_response == pydantic_input_response
assert list(model.predict_stream(dict_input_example)) == list(
model.predict_stream(*pydantic_input_example)
)
with pytest.raises(MlflowException, match="Invalid dictionary input for a ChatAgent"):
model.predict({"malformed dict": "bad"})
with pytest.raises(MlflowException, match="Invalid dictionary input for a ChatAgent"):
model.predict_stream({"malformed dict": "bad"})
model = SimpleBadChatAgent()
with pytest.raises(pydantic.ValidationError, match="validation error for ChatAgentResponse"):
model.predict(dict_input_example)
with pytest.raises(pydantic.ValidationError, match="validation error for ChatAgentChunk"):
list(model.predict_stream(dict_input_example))
def test_chat_agent_predict_with_params(tmp_path):
# test to codify having params in the signature
# needed because `load_model_and_predict` in `utils/_capture_modules.py` expects a params field
model = SimpleChatAgent()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
assert isinstance(loaded_model._model_impl, _ChatAgentPyfuncWrapper)
response = loaded_model.predict(CHAT_AGENT_INPUT_EXAMPLE, params=None)
assert response["messages"][0]["content"] == "Hello!"
responses = list(loaded_model.predict_stream(CHAT_AGENT_INPUT_EXAMPLE, params=None))
for i, resp in enumerate(responses[:-1]):
assert resp["delta"]["content"] == f"message {i}"
| ChatAgentWithCustomInputs |
python | openai__openai-python | src/openai/resources/containers/containers.py | {
"start": 1193,
"end": 8787
} | class ____(SyncAPIResource):
@cached_property
def files(self) -> Files:
return Files(self._client)
@cached_property
def with_raw_response(self) -> ContainersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContainersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContainersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContainersWithStreamingResponse(self)
def create(
self,
*,
name: str,
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerCreateResponse:
"""
Create Container
Args:
name: Name of the container to create.
expires_after: Container expiration time in seconds relative to the 'anchor' time.
file_ids: IDs of files to copy to the container.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/containers",
body=maybe_transform(
{
"name": name,
"expires_after": expires_after,
"file_ids": file_ids,
},
container_create_params.ContainerCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerCreateResponse,
)
def retrieve(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerRetrieveResponse:
"""
Retrieve Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerRetrieveResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ContainerListResponse]:
"""List Containers
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/containers",
page=SyncCursorPage[ContainerListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
container_list_params.ContainerListParams,
),
),
model=ContainerListResponse,
)
def delete(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
| Containers |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 77863,
"end": 78580
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_id",
"title",
"body",
"category_id",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
category_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="categoryId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CreateDiscussionInput |
python | cython__cython | runtests.py | {
"start": 80095,
"end": 81951
} | class ____(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = os.path.abspath(os.path.join('..', '..', 'cython.py'))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
],
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
if err.output:
self.fail("EmbedTest failed: " + err.output.decode().strip())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just reuse the FileListExclude implementation
return FileListExcluder(filename)
| EmbedTest |
python | pydantic__pydantic | pydantic-core/tests/conftest.py | {
"start": 3550,
"end": 5727
} | class ____:
def __init__(self, schema: bool, extra: bool):
assert schema or extra
self.schema = schema
self.validator_args = {'strict': True} if extra else {}
@pytest.fixture(
params=[
StrictModeType(schema=True, extra=False),
StrictModeType(schema=False, extra=True),
StrictModeType(schema=True, extra=True),
],
ids=['strict-schema', 'strict-extra', 'strict-both'],
)
def strict_mode_type(request) -> StrictModeType:
return request.param
@pytest.fixture
def tmp_work_path(tmp_path: Path):
"""
Create a temporary working directory.
"""
previous_cwd = Path.cwd()
os.chdir(tmp_path)
yield tmp_path
os.chdir(previous_cwd)
@pytest.fixture
def import_execute(request, tmp_work_path: Path):
def _import_execute(source: str, *, custom_module_name: str | None = None):
module_name = custom_module_name or request.node.name
module_path = tmp_work_path / f'{module_name}.py'
module_path.write_text(source)
spec = importlib.util.spec_from_file_location('__main__', str(module_path))
module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(module)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
return module
return _import_execute
@pytest.fixture
def pydantic_version():
try:
import pydantic
# include major and minor version only
return '.'.join(pydantic.__version__.split('.')[:2])
except ImportError:
return 'latest'
def infinite_generator():
i = 0
while True:
yield i
i += 1
def assert_gc(test: Callable[[], bool], timeout: float = 10) -> None:
"""Helper to retry garbage collection until the test passes or timeout is
reached.
This is useful on free-threading where the GC collect call finishes before
all cleanup is done.
"""
start = now = time()
while now - start < timeout:
if test():
return
gc.collect()
sleep(0.1)
now = time()
raise AssertionError('Timeout waiting for GC')
| StrictModeType |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 33423,
"end": 34563
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unwilling to process
the request because its header fields are too large. The request MAY
be resubmitted after reducing the size of the request header fields.
RFC 6585.5
code: 431, title: Request Header Fields Too Large
"""
code = 431
title = 'Request Header Fields Too Large'
explanation = 'The requests header fields were too large.'
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
| HTTPRequestHeaderFieldsTooLarge |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 7579,
"end": 8431
} | class ____(MemoryLeakMixin, TestCase):
def test_overload_method(self):
@njit
def check(x):
vs = np.arange(10, dtype=np.float64)
ctr = 11
obj = MyStruct(vs, ctr)
return obj.testme(x)
x = 3
got = check(x)
expect = check.py_func(x)
self.assertPreciseEqual(got, expect)
def test_overload_attribute(self):
@njit
def check():
vs = np.arange(10, dtype=np.float64)
ctr = 11
obj = MyStruct(vs, ctr)
return obj.prop
got = check()
expect = check.py_func()
self.assertPreciseEqual(got, expect)
def caching_test_make(x, y):
struct = MyStruct(values=x, counter=y)
return struct
def caching_test_use(struct, z):
return struct.testme(z)
| TestStructRefExtending |
python | getsentry__sentry | src/sentry/users/models/user_option.py | {
"start": 5458,
"end": 10264
} | class ____(Model):
"""
User options apply only to a user, and optionally a project OR an organization.
Options which are specific to a plugin should namespace
their key. e.g. key='myplugin:optname'
Keeping user feature state
key: "feature:assignment"
value: { updated: datetime, state: bool }
where key is one of:
(please add to this list if adding new keys)
- clock_24_hours
- 12hr vs. 24hr
- issue:defaults
- only used in Jira, set default reporter field
- issues:defaults:jira
- unused
- issues:defaults:jira_server
- unused
- prefers_issue_details_streamlined_ui
- Whether the user prefers the new issue details experience (boolean)
- prefers_chonk_ui
- Whether the user prefers the new Chonk UI experience (boolean)
- language
- which language to display the app in
- mail:email
- which email address to send an email to
- reports:disabled-organizations
- which orgs to not send weekly reports to
- seen_release_broadcast
- unused
- self_assign_issue
- "Claim Unassigned Issues I've Resolved"
- self_notifications
- "Notify Me About My Own Activity"
- stacktrace_order
- default, most recent first, most recent last (see `StacktraceOrder` enum)
- subscribe_by_default
- "Only On Issues I Subscribe To", "Only On Deploys With My Commits"
- subscribe_notes
- unused
- timezone
- user's timezone to display timestamps
- theme
- dark, light, or default
- twilio:alert
- unused
- workflow_notifications
- unused
"""
__relocation_scope__ = RelocationScope.User
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
project_id = HybridCloudForeignKey("sentry.Project", null=True, on_delete="CASCADE")
organization_id = HybridCloudForeignKey("sentry.Organization", null=True, on_delete="CASCADE")
key = models.CharField(max_length=64)
value = models.JSONField(null=True)
objects: ClassVar[UserOptionManager] = UserOptionManager()
class Meta:
app_label = "sentry"
db_table = "sentry_useroption"
unique_together = (("user", "project_id", "key"), ("user", "organization_id", "key"))
__repr__ = sane_repr("user_id", "project_id", "organization_id", "key", "value")
@classmethod
def get_relocation_ordinal_fields(cls, json_model: Any) -> list[str] | None:
# "global" user options (those with no organization and/or project scope) get a custom
# ordinal; non-global ones use the default ordering.
org_id = json_model["fields"].get("organization_id", None)
project_id = json_model["fields"].get("project_id", None)
if org_id is None and project_id is None:
return ["user", "key"]
return None
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
from sentry.users.models.user import User
old_user_id = self.user_id
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
if old_pk is None:
return None
# If we are merging users, ignore the imported options and use the existing user's
# options instead.
if pk_map.get_kind(get_model_name(User), old_user_id) == ImportKind.Existing:
return None
return old_pk
def write_relocation_import(
self, scope: ImportScope, flags: ImportFlags
) -> tuple[int, ImportKind] | None:
# TODO(getsentry/team-ospo#190): This circular import is a bit gross. See if we can't find a
# better place for this logic to live.
from sentry.users.api.endpoints.user_details import UserOptionsSerializer
serializer_options = UserOptionsSerializer(data={self.key: self.value}, partial=True)
serializer_options.is_valid(raise_exception=True)
# TODO(getsentry/team-ospo#190): Find a more general solution to one-off indices such as
# this. We currently have this constraint on prod, but not in Django, probably from legacy
# SQL manipulation.
#
# Ensure that global (ie: `organization_id` and `project_id` both `NULL`) constraints are
# not duplicated on import.
if self.organization_id is None and self.project_id is None:
colliding_global_user_option = self.objects.filter(
user=self.user, key=self.key, organization_id__isnull=True, project_id__isnull=True
).first()
if colliding_global_user_option is not None:
return None
return super().write_relocation_import(scope, flags)
| UserOption |
python | apache__airflow | airflow-core/src/airflow/dag_processing/bundles/base.py | {
"start": 2769,
"end": 7946
} | class ____:
"""
Utility helper for removing stale bundles.
:meta private:
"""
def _parse_dt(self, val) -> DateTime | None:
try:
return pendulum.parse(val)
except ParserError:
return None
@staticmethod
def _filter_for_min_versions(val: list[TrackedBundleVersionInfo]) -> list[TrackedBundleVersionInfo]:
min_versions_to_keep = conf.getint(
section="dag_processor",
key="stale_bundle_cleanup_min_versions",
)
return sorted(val, key=attrgetter("dt"), reverse=True)[min_versions_to_keep:]
@staticmethod
def _filter_for_recency(val: list[TrackedBundleVersionInfo]) -> list[TrackedBundleVersionInfo]:
age_threshold = conf.getint(
section="dag_processor",
key="stale_bundle_cleanup_age_threshold",
)
ret = []
now = pendulum.now(tz=pendulum.UTC)
cutoff = now - timedelta(seconds=age_threshold)
for item in val:
if item.dt < cutoff:
ret.append(item)
return ret
def _find_all_tracking_files(self, bundle_name) -> list[TrackedBundleVersionInfo] | None:
tracking_dir = get_bundle_tracking_dir(bundle_name=bundle_name)
found: list[TrackedBundleVersionInfo] = []
if not tracking_dir.exists():
log.debug("bundle usage tracking directory does not exist. tracking_dir=%s", tracking_dir)
return None
for file in tracking_dir.iterdir():
log.debug("found bundle tracking file, path=%s", file)
version = file.name
dt_str = file.read_text()
dt = self._parse_dt(val=dt_str)
if not dt:
log.error(
"could not parse val as datetime bundle_name=%s val=%s version=%s",
bundle_name,
dt_str,
version,
)
continue
found.append(TrackedBundleVersionInfo(lock_file_path=file, version=version, dt=dt))
return found
@staticmethod
def _remove_stale_bundle(bundle_name: str, info: TrackedBundleVersionInfo) -> None:
bundle_version_path = get_bundle_version_path(
bundle_name=bundle_name,
version=info.version,
)
def log_info(msg):
log.info(
"%s bundle_name=%s bundle_version=%s bundle_path=%s lock_file=%s",
msg,
bundle_name,
info.version,
bundle_version_path,
info.lock_file_path,
)
try:
log_info("removing stale bundle.")
with open(info.lock_file_path, "a") as f:
flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) # exclusive lock, do not wait
# remove the actual bundle copy
shutil.rmtree(bundle_version_path)
# remove the lock file
os.remove(info.lock_file_path)
except BlockingIOError:
log_info("could not obtain lock. stale bundle will not be removed.")
return
def _find_candidates(self, found):
"""Remove the recently used bundles."""
candidates = self._filter_for_min_versions(found)
candidates = self._filter_for_recency(candidates)
if log.isEnabledFor(level=logging.DEBUG):
self._debug_candidates(candidates, found)
return candidates
@staticmethod
def _debug_candidates(candidates, found):
recently_used = list(set(found).difference(candidates))
if candidates:
log.debug(
"found removal candidates. candidates=%s recently_used=%s",
candidates,
recently_used,
)
else:
log.debug(
"no removal candidates found. candidates=%s recently_used=%s",
candidates,
recently_used,
)
def _remove_stale_bundle_versions_for_bundle(self, bundle_name: str):
log.info("checking bundle for stale versions. bundle_name=%s", bundle_name)
found = self._find_all_tracking_files(bundle_name=bundle_name)
if not found:
return
candidates = self._find_candidates(found)
for info in candidates:
self._remove_stale_bundle(bundle_name=bundle_name, info=info)
def remove_stale_bundle_versions(self):
"""
Remove bundles that are not in use and have not been used for some time.
We will keep last N used bundles, and bundles last used with in X time.
This isn't really necessary on worker types that don't share storage
with other processes.
"""
from airflow.dag_processing.bundles.manager import DagBundlesManager
log.info("checking for stale bundle versions locally")
bundles = list(DagBundlesManager().get_all_dag_bundles())
for bundle in bundles:
if not bundle.supports_versioning:
continue
self._remove_stale_bundle_versions_for_bundle(bundle_name=bundle.name)
| BundleUsageTrackingManager |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 88239,
"end": 88511
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
AutoModelForZeroShotObjectDetection = auto_class_update(
AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection"
)
| AutoModelForZeroShotObjectDetection |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/other/decorator.py | {
"start": 60,
"end": 173
} | class ____:
pass
@decorator
def f(): ...
@a.b.c
def f(): ...
@a
@a.b.c
def f(): ...
@a
@1 | 2
@a.b.c
| Test |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 19219,
"end": 19561
} | class ____:
# GH 16798
def setup(self):
self.csv = StringIO(
"strings\n" + "\n".join(["x" * (1 << 20) for _ in range(2100)])
)
def peakmem_over_2gb_input(self):
read_csv(self.csv, engine="c", low_memory=False)
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| ReadCSVCParserLowMemory |
python | tensorflow__tensorflow | tensorflow/compiler/tests/lstm_test.py | {
"start": 1653,
"end": 10381
} | class ____(test.TestCase):
def setUp(self):
# The tests for a single LSTM cell and LSTM layer use these values as
# inputs. We always set the dimensionality of num_inputs=1; thus batch_size
# actually represents the different input cases.
self._inputs = np.array([[-1.], [-.5], [0.], [.5], [1.]], np.float32)
self._batch_size = len(self._inputs)
def _NextC(self, inputs, weight, m_prev, c_prev):
"""Returns the next c states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Clip(_Sigmoid(x) * c_prev) + _Clip(_Sigmoid(x) * np.tanh(x)))
def _NextM(self, inputs, weight, m_prev, c_prev):
"""Returns the next m states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Sigmoid(x) * self._NextC(inputs, weight, m_prev, c_prev))
def _RunLSTMCell(self, basename, init_weights, m_prev_scalar, c_prev_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_prev = constant_op.constant([[m_prev_scalar]] * self._batch_size)
c_prev = constant_op.constant([[c_prev_scalar]] * self._batch_size)
x = constant_op.constant(self._inputs)
pad = constant_op.constant([[pad_scalar]] * self._batch_size)
m, c = lstm.LSTMCell(weights, m_prev, c_prev, x, pad)
_DumpGraph(sess.graph, 'lstm_cell_%s_%d_%d_%d' %
(basename, m_prev_scalar, c_prev_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM step.
self.evaluate(variables.global_variables_initializer())
return self.evaluate([m, c])
@test_util.run_without_tensor_float_32('TF32 capable devices fail the test'
' due to reduced matmul precision')
def testLSTMCell(self):
# Run with all-0 weights, no padding.
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(m, [[0.]] * self._batch_size)
self.assertAllClose(c, [[0.]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(m, [[.0]] * self._batch_size)
self.assertAllClose(c, [[.0]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
# Run with all-1 weights, no padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('ones',
init_ops.ones_initializer(), m_prev, c_prev,
0.)
self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev))
self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev))
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.)
self.assertAllClose(m,
self._NextM(self._inputs, weight, m_prev, c_prev))
self.assertAllClose(c,
self._NextC(self._inputs, weight, m_prev, c_prev))
# Set padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.)
self.assertAllClose(m, [[m_prev]] * self._batch_size)
self.assertAllClose(c, [[c_prev]] * self._batch_size)
def testLSTMLayerErrors(self):
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = array_ops.zeros(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m = constant_op.constant([[0.]] * self._batch_size)
c = constant_op.constant([[0.]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad = constant_op.constant([[0.]] * self._batch_size)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad])
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 2)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 4)
def _RunLSTMLayer(self, basename, init_weights, m_init_scalar, c_init_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_init = constant_op.constant([[m_init_scalar]] * self._batch_size)
c_init = constant_op.constant([[c_init_scalar]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad_seq = [constant_op.constant([[pad_scalar]] * self._batch_size)
] * seq_length
out_seq = lstm.LSTMLayer('lstm', weights, m_init, c_init, x_seq, pad_seq)
_DumpGraph(sess.graph, 'lstm_layer_%s_%d_%d_%d' %
(basename, m_init_scalar, c_init_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM layer.
self.evaluate(variables.global_variables_initializer())
return self.evaluate(out_seq)
@test_util.run_without_tensor_float_32('TF32 capable devices fail the test'
' due to reduced matmul precision')
def testLSTMLayer(self):
# Run with all-0 weights, no padding.
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
# Run with all-1 weights, no padding.
weight1 = 1.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('ones',
init_ops.ones_initializer(), m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight1, m_init, c_init)
c0 = self._NextC(self._inputs, weight1, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight1, m0, c0)
c1 = self._NextC(self._inputs, weight1, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight1, m1, c1)
self.assertAllClose(o[2], m2)
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('random', random_weight, m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight, m_init, c_init)
c0 = self._NextC(self._inputs, weight, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight, m0, c0)
c1 = self._NextC(self._inputs, weight, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight, m1, c1)
self.assertAllClose(o[2], m2)
# Set padding.
o = self._RunLSTMLayer('random', random_weight, 0., 0., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 0., 1., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 0., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 1., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
| LSTMTest |
python | django__django | tests/auth_tests/models/with_foreign_key.py | {
"start": 229,
"end": 515
} | class ____(BaseUserManager):
def create_superuser(self, username, email, group, password):
user = self.model(username_id=username, email_id=email, group_id=group)
user.set_password(password)
user.save(using=self._db)
return user
| CustomUserWithFKManager |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_gather_op_test.py | {
"start": 1493,
"end": 19868
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
# Basic gather (axis=0 and batch_dims=0)
dict(testcase_name='Params1DTensor_Indices1DTensor',
params=['a', 'b', 'c', 'd', 'e'],
indices=[2, 0, 2, 1],
expected=['c', 'a', 'c', 'b']),
dict(testcase_name='Params1DTensor_Indices2DRagged',
params=['a', 'b', 'c', 'd', 'e'],
indices=[[3, 1, 2], [1], [], [0]],
expected=[['d', 'b', 'c'], ['b'], [], ['a']]),
dict(testcase_name='Params2DRagged_Indices0DTensor',
params=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
indices=1,
expected=['c', 'd', 'e']),
dict(testcase_name='Params2DRagged_Indices1DTensor',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[3, 1, 2, 1, 0],
expected=[
['e'], ['d'], [], ['d'], ['a', 'b', 'c']]),
dict(testcase_name='Params2DRagged_Indices2DRagged',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[[3, 1, 2], [1], [], [0]],
expected=[
[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]]),
dict(testcase_name='Params3DRagged_Indices2DTensor',
params=[
[['a', 'b'], []], [['c', 'd'], ['e'], ['f']], [['g']]],
indices=[[1, 2], [0, 1], [2, 2]],
indices_ragged_rank=0,
expected=[
[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]],
[[['g']], [['g']]]]),
dict(testcase_name='Params3DRagged_Indices3DTensor',
params=[[['a', 'b'], []],
[['c', 'd'], ['e'], ['f']],
[['g']]],
indices=[[[1, 2], [0, 1], [2, 2]], [[0, 0], [1, 2], [0, 1]]],
indices_ragged_rank=0,
expected=[
[[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]],
[[['g']], [['g']]]],
[[[['a', 'b'], []], [['a', 'b'], []]],
[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]]]]),
dict(testcase_name='Params1DTensor_Indices4DRaggedRank2',
params=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
indices=[[[[3, 4], [0, 6]], []],
[[[2, 1], [1, 0]], [[2, 5]], [[2, 3]]],
[[[1, 0]]]],
indices_ragged_rank=2,
expected=[
[[['d', 'e'], ['a', 'g']], []],
[[['c', 'b'], ['b', 'a']], [['c', 'f']], [['c', 'd']]],
[[['b', 'a']]]]),
# Batch gather (batch_dims=1)
dict(testcase_name='Batch1D_Params2DRagged_Indices1DTensor',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[1, 0, 3, 0],
batch_dims=1,
expected=['b', 'c', 'g', 'h']),
dict(testcase_name='Batch1D_Params2DRagged_Indices2DTensor',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[[1, 0], [0, 0], [3, 1], [0, 0]],
indices_ragged_rank=0,
batch_dims=1,
expected=[['b', 'a'], ['c', 'c'], ['g', 'e'], ['h', 'h']]),
dict(testcase_name='Batch1D_Params2DRagged_Indices2DRagged',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[[1, 0], [], [3, 2, 1], [0]],
batch_dims=1,
expected=[['b', 'a'], [], ['g', 'f', 'e'], ['h']]),
dict(testcase_name='Batch1D_Params3DRagged_Indices3DRagged',
params=[[['a'], ['b', 'c']],
[],
[['d', 'e', 'f'], ['g'], ['h', 'i'], ['j']],
[['k']]],
indices=[[[1, 0], []], [], [[3, 2, 1], [0]], [[0]]],
batch_dims=1,
expected=[[[['b', 'c'], ['a']], []],
[],
[[['j'], ['h', 'i'], ['g']], [['d', 'e', 'f']]],
[[['k']]]]),
# Batch gather (batch_dims=2)
dict(testcase_name='Batch2D_Params3DRagged_Indices2DRagged',
params=[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]],
indices=[[0, 1, 0], [0, 1]],
batch_dims=2,
expected=[['a', 'e', 'f'], ['g', 'i']]),
dict(testcase_name='Batch2D_Params3DRagged_Indices3DRagged',
params=[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]],
indices=[[[2, 1, 0], [1, 1], [0]], [[0], []]],
batch_dims=2,
expected=[[['c', 'b', 'a'], ['e', 'e'], ['f']], [['g'], []]]),
# Batch gather (batch_dims=3)
dict(testcase_name='Batch3D_Params4DRagged_Indices3DRagged',
params=[[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]], [[['j']]]],
indices=[[[0, 1, 0], [0, 1]], [[0]]],
batch_dims=3,
expected=[[['a', 'e', 'f'], ['g', 'i']], [['j']]]),
# Axis gather (axis=1)
dict(testcase_name='Params2DRagged_Indices0DTensor_axis_1',
params=[['a', 'b'], ['c', 'd', 'e'], ['f', 'g'], ['h', 'i', 'j'],
['k', 'l']],
indices=1,
axis=1,
expected=['b', 'd', 'g', 'i', 'l']),
dict(testcase_name='Params2DRagged_Indices1DTensor_axis_1',
params=[['a', 'b'], ['c', 'd', 'e'], ['f', 'g'], ['h', 'i', 'j'],
['k', 'l']],
indices=[1, 0],
axis=1,
expected=[['b', 'a'], ['d', 'c'], ['g', 'f'], ['i', 'h'],
['l', 'k']]),
dict(testcase_name='Params3DRagged_Indices0DTensor_axis_1',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=1,
axis=1,
expected=[['c', 'd', 'e'], ['h', 'i', 'j']]),
dict(testcase_name='Params3DRagged_Indices1DTensor_axis_1',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=[1, 0],
axis=1,
expected=[[['c', 'd', 'e'], ['a', 'b']],
[['h', 'i', 'j'], ['f', 'g']]]),
# Batch/axis gather, batch = 1, axis > batch
dict(testcase_name='Params3DRagged_Indices1DTensor_batch_1_axis_2',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=[1, 0],
axis=2,
batch_dims=1,
expected=[['b', 'd'], ['f', 'h', 'k']]),
dict(testcase_name='Params4DRagged_Indices1DTensor_batch_1_axis_2',
params=[[[['a', 'b'], ['c', 'd', 'e']]],
[[['f', 'g']], [['h', 'i', 'j'], ['k', 'l']]]],
indices=[0, 1],
axis=2,
batch_dims=1,
expected=[[['a', 'b']],
[['h', 'i', 'j'], ['k', 'l']]]),
]) # pyformat: disable
def testRaggedGather(self,
params,
indices,
expected,
axis=None,
batch_dims=0,
params_ragged_rank=None,
indices_ragged_rank=None):
params = ragged_factory_ops.constant(params, ragged_rank=params_ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank)
actual = ragged_gather_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(actual, self._str_to_bytes(expected))
def _str_to_bytes(self, x):
if isinstance(x, list):
return [self._str_to_bytes(v) for v in x]
elif isinstance(x, str) and bytes is not str:
return bytes(x, 'utf-8')
else:
return x
def testOutOfBoundsError(self):
tensor_params = ['a', 'b', 'c']
tensor_indices = [0, 1, 2]
ragged_params = ragged_factory_ops.constant([['a', 'b'], ['c']])
ragged_indices = ragged_factory_ops.constant([[0, 3]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 3\)'):
self.evaluate(ragged_gather_ops.gather(tensor_params, ragged_indices))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[2\] = 2 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, tensor_indices))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, ragged_indices))
def testUnknownIndicesRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([], ragged_rank=1)
indices = constant_op.constant([0], dtype=dtypes.int64)
indices = array_ops.placeholder_with_default(indices, None)
self.assertRaisesRegex(ValueError,
r'rank\(indices\) must be known statically',
ragged_gather_ops.gather, params, indices)
# pylint: disable=bad-whitespace
@parameterized.parameters([
# params.shape=[2, None]; indices.shape=[3]
dict(
params = [[1.0, 2.0], [3.0, 4.0, 5.0]],
indices = [0, 0, 1],
expected_out = [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0, 5.0]],
out_grad = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6, 0.7]],
expected_grad = [[0.4, 0.6], [0.5, 0.6, 0.7]]),
# params.shape=[2, None]; indices.shape=[0]
dict(
params = [[1, 2], [3, 4, 5]],
indices = [],
expected_out = [],
out_grad = [],
expected_grad = [[0, 0], [0, 0, 0]]),
# params.shape=[2, None]; indices.shape=[2, 2]
dict(
params = [[1.0, 2.0], [3.0, 4.0, 5.0]],
indices = [[0, 0], [1, 0]],
expected_out = [[[1.0, 2.0], [1.0, 2.0]],
[[3.0, 4.0, 5.0], [1.0, 2.0]]],
out_grad = [[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6, 0.7], [0.8, 0.9]]],
expected_grad = [[1.2, 1.5], [0.5, 0.6, 0.7]]),
# params.shape=[3, None, None]; indices.shape=[3]
dict(
params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]],
indices = [2, 1, 2],
expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]],
out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]],
expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]),
# params.shape=[3, None, None]; indices.shape=[0]
dict(
params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]],
indices = [2, 1, 2],
expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]],
out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]],
expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]),
# params.shape=[0, None]; indices.shape=[0]
dict(
params = [],
indices = [],
expected_out = [],
out_grad = [],
expected_grad = [],
params_ragged_rank = 1),
# params.shape=[2, None, 2]; indices.shape=[3]
dict(
params = [[[1, 2], [3, 4]], [], [[5, 6]]],
indices = [1, 1, 2, 0, 2],
expected_out = [[], [], [[5, 6]], [[1, 2], [3, 4]], [[5, 6]]],
out_grad = [[], [], [[1, 2]], [[3, 4], [5, 6]], [[7, 7]]],
expected_grad = [[[3, 4], [5, 6]], [], [[8, 9]]],
params_ragged_rank = 1),
]) # pyformat: disable
@test_util.run_deprecated_v1
def testGradient(self,
params,
indices,
expected_out,
out_grad,
expected_grad,
params_ragged_rank=None):
"""Tests that ragged_gather generates the right gradient.
Args:
params: The `params` that should be passed to `gather`.
indices: The `indices` that should be passed to `gather`.
expected_out: The expected value of `gather(params, indices)`.
`expected_out.shape = indices.shape + params.shape[1:]`.
out_grad: The value that should be fed in as the gradient for `out` when
testing the gradient of `ragged_gather`. Must have the same shape as
`expected_out`.
expected_grad: The expected gradient for that should be returned for
`params`. Must have the same shape as `params`.
params_ragged_rank: The ragged_rank of `params`.
"""
if context.executing_eagerly():
return
params = ragged_factory_ops.constant(
params, dtype=dtypes.float32, ragged_rank=params_ragged_rank)
indices = constant_op.constant(indices, dtype=dtypes.int32)
out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1
out_grad = ragged_factory_ops.constant(
out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_out = ragged_factory_ops.constant(
expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_grad = ragged_factory_ops.constant(
expected_grad,
dtype=dtypes.float32,
ragged_rank=params.ragged_rank)
out = ragged_gather_ops.gather(params, indices)
self.assertAllClose(out, expected_out)
grads = gradients_impl.gradients(
out.flat_values,
(params.nested_row_splits + (params.flat_values, indices,)),
out_grad.flat_values)
param_nested_splits_grads = grads[:-2]
params_flat_values_grad = grads[-2]
indices_grad = grads[-1]
self.assertEqual(indices_grad, None)
for splits_grad in param_nested_splits_grads:
self.assertEqual(splits_grad, None)
# The gradient generates an IndexedSlices; convert back to a normal Tensor.
self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices)
params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad)
params_grad = params.with_flat_values(params_flat_values_grad)
self.assertAllClose(params_grad, expected_grad, atol=2e-6, rtol=2e-6)
@parameterized.parameters([
# Basic gather (batch_dims == 0, axis == 0)
dict(params_shape=[3, 4], indices_shape=[], axis=0),
dict(params_shape=[3, 4], indices_shape=[5], axis=0),
dict(params_shape=[3, 4], indices_shape=[2, 5], axis=0),
# Gather over axis (axis > 0)
dict(params_shape=[3, 4], indices_shape=[], axis=1),
dict(params_shape=[3, 4], indices_shape=[2], axis=1),
dict(params_shape=[3, 4], indices_shape=[2, 5], axis=1),
dict(params_shape=[7, 3, 1], indices_shape=[2, 4], axis=1),
dict(params_shape=[3, 4, 5, 6], indices_shape=[2, 1, 7], axis=1),
dict(params_shape=[7, 3, 5], indices_shape=[], axis=2),
dict(params_shape=[7, 3, 5], indices_shape=[2], axis=2),
dict(params_shape=[7, 3, 5], indices_shape=[4, 2], axis=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[4, 2], axis=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[4], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[8, 4], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[2, 3, 2, 3], axis=3),
# Batched gather (batch_dims > 0)
dict(params_shape=[7, 3], indices_shape=[7], batch_dims=1),
dict(params_shape=[7, 3], indices_shape=[7, 5], batch_dims=1),
dict(params_shape=[5, 3], indices_shape=[5, 7, 4, 2], batch_dims=1),
dict(params_shape=[2, 3, 6], indices_shape=[2], batch_dims=1),
dict(params_shape=[7, 3, 6], indices_shape=[7, 5, 4, 2], batch_dims=1),
dict(params_shape=[7, 3, 5], indices_shape=[7, 3], batch_dims=2),
dict(params_shape=[7, 3, 5], indices_shape=[7, 3, 2], batch_dims=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[7, 3, 5], batch_dims=3),
dict(params_shape=[2, 3, 5, 6], indices_shape=[2, 3, 5, 7], batch_dims=3),
# Batched gather with axis (axis > batch_dims > 0)
dict(params_shape=[2, 3, 6], indices_shape=[2], axis=2, batch_dims=1),
dict(params_shape=[2, 3, 6], indices_shape=[2, 4], axis=2, batch_dims=1),
dict(
params_shape=[3, 1, 6, 7], indices_shape=[3, 4], axis=3,
batch_dims=1),
dict(
params_shape=[3, 2, 6, 7], indices_shape=[3, 4], axis=3,
batch_dims=1),
dict(
params_shape=[2, 3, 6, 7], indices_shape=[2, 3], axis=3,
batch_dims=2),
])
def testMatchesDenseGather(self,
params_shape,
indices_shape,
axis=None,
batch_dims=0):
# Build random params & indices matrics w/ the expected shapes.
if axis is None:
axis = batch_dims
params = np.random.randint(100, size=params_shape, dtype=np.int32)
indices = np.random.randint(
params_shape[axis], size=indices_shape, dtype=np.int32)
# Use array_ops.gather to get the expected value.
expected = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
# Build ragged tensors with varying ragged_ranks from params & axis.
params_tensors = [params] + [
ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=i)
for i in range(1, len(params_shape))
]
indices_tensors = [indices] + [
ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=i)
for i in range(1, len(indices_shape))
]
# For each combination of params & axis tensors, check that
# ragged_gather_ops.gather matches array_ops.gather.
for params_tensor in params_tensors:
for indices_tensor in indices_tensors:
actual = ragged_gather_ops.gather(
params_tensor, indices_tensor, axis=axis, batch_dims=batch_dims)
if isinstance(actual, ragged_tensor.RaggedTensor):
actual = actual.to_tensor()
self.assertAllEqual(
expected, actual, 'params.ragged_rank=%s, indices.ragged_rank=%s' %
(getattr(params_tensor, 'ragged_rank',
0), getattr(indices_tensor, 'ragged_rank', 0)))
if __name__ == '__main__':
googletest.main()
| RaggedGatherOpTest |
python | pytorch__pytorch | test/inductor/test_aot_inductor_package.py | {
"start": 2581,
"end": 39115
} | class ____(TestCase):
def check_model(
self: TestCase,
model,
example_inputs,
inductor_configs=None,
dynamic_shapes=None,
atol=None,
rtol=None,
) -> AOTICompiledModel:
with torch.no_grad():
torch.manual_seed(0)
model = model.to(self.device)
ref_model = copy.deepcopy(model)
ref_inputs = copy.deepcopy(example_inputs)
expected = ref_model(*ref_inputs)
inductor_configs = inductor_configs or {}
inductor_configs["aot_inductor.package_cpp_only"] = self.package_cpp_only
torch.manual_seed(0)
with WritableTempFile(suffix=".pt2") as f:
compiled_model = compile(
model,
example_inputs,
dynamic_shapes=dynamic_shapes,
inductor_configs=inductor_configs,
package_path=f.name,
)
actual = compiled_model(*example_inputs)
self.assertEqual(actual, expected, atol=atol, rtol=rtol)
return compiled_model
def check_package_cpp_only(self: TestCase) -> None:
"""
Check if cmake and make are available.
Skip self.package_cpp_only=False tests
"""
if not self.package_cpp_only:
raise unittest.SkipTest("Only meant to test cpp package")
if shutil.which("cmake") is None:
raise unittest.SkipTest("cmake is not available")
if shutil.which("make") is None:
raise unittest.SkipTest("make is not available")
def cmake_compile_and_run(self, base_dir):
custom_env = os.environ.copy()
custom_env["CMAKE_PREFIX_PATH"] = ":".join(
[str(Path(torch.__file__).parent)]
+ os.environ.get("CMAKE_PREFIX_PATH", "").split(":")
)
build_path = Path(base_dir) / "build"
build_path.mkdir()
subprocess.run(
["cmake", ".."],
cwd=build_path,
env=custom_env,
check=True,
)
subprocess.run(["make"], cwd=build_path, check=True)
result = subprocess.run(
["./build/main"],
cwd=base_dir,
check=True,
capture_output=True,
text=True,
)
return result
def cmake_compile(self, model, example_inputs, options, tmp_dir):
"""
Exports model, compiles it using AOTInductor, extracts the
generated files to tmp_dir, and builds the C++ code using CMake and Make.
Returns:
- build_path (Path): Path to the CMake build directory containing the compiled binary.
- tmp_path (Path): Path to the extracted model source directory.
"""
ep = torch.export.export(model, example_inputs)
package_path = torch._inductor.aoti_compile_and_package(
ep, inductor_configs=options
)
with (
zipfile.ZipFile(package_path, "r") as zip_ref,
):
filenames = zip_ref.namelist()
prefix = filenames[0].split("/")[0]
zip_ref.extractall(tmp_dir)
tmp_path = Path(tmp_dir) / prefix / "data" / "aotinductor" / "model"
self.assertTrue(tmp_path.exists())
# Create a build directory to run cmake
build_path = tmp_path / "build"
self.assertTrue(not build_path.exists())
build_path.mkdir()
custom_env = os.environ.copy()
custom_env["CMAKE_PREFIX_PATH"] = ":".join(
[str(Path(torch.__file__).parent)]
+ os.environ.get("CMAKE_PREFIX_PATH", "").split(":")
)
subprocess.run(
["cmake", ".."],
cwd=build_path,
env=custom_env,
check=True,
)
subprocess.run(["make"], cwd=build_path, check=True)
return build_path, tmp_path
def test_add(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
self.check_model(Model(), example_inputs)
def test_remove_intermediate_files(self):
# For CUDA, generated cpp files contain absolute path to the generated cubin files.
# With the package artifact, that cubin path should be overridden at the run time,
# so removing those intermediate files in this test to verify that.
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
model = Model()
with torch.no_grad():
torch.manual_seed(0)
model = model.to(self.device)
ref_model = copy.deepcopy(model)
ref_inputs = copy.deepcopy(example_inputs)
expected = ref_model(*ref_inputs)
torch.manual_seed(0)
with WritableTempFile(suffix=".pt2") as f:
ep = torch.export.export(model, example_inputs, strict=True)
with fresh_cache():
# cubin files are removed when exiting this context
package_path = torch._inductor.aoti_compile_and_package(
ep,
package_path=f.name,
) # type: ignore[arg-type]
loaded = torch._inductor.aoti_load_package(package_path)
actual = loaded(*example_inputs)
self.assertEqual(actual, expected)
def test_linear(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x, y):
return x + self.linear(y)
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
self.check_model(Model(), example_inputs)
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@unittest.skipIf(
TEST_CUDA and _get_torch_cuda_version() < (12, 6),
"Test is only supported on CUDA 12.6+",
)
def test_compile_after_package(self):
self.check_package_cpp_only()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x, y):
return x + self.linear(y)
with torch.no_grad():
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
model = Model().to(device=self.device)
expected = model(*example_inputs)
options = {
"aot_inductor.package_cpp_only": self.package_cpp_only,
# Require kernels to be compiled into .o files
"aot_inductor.embed_kernel_binary": True,
}
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
build_path, tmp_path = self.cmake_compile(
model, example_inputs, options, tmp_dir
)
if self.device == GPU_TYPE:
kernel_bin = get_kernel_bin_format(self.device)
self.assertTrue(not list(tmp_path.glob(f"*.{kernel_bin}")))
# Check if .cubin.o files exist and use unique kernel names
self.assertTrue(list(tmp_path.glob(f"triton_*.{kernel_bin}.o")))
# Check if the .so file was build successfully
so_path = build_path / "libaoti_model.so"
self.assertTrue(so_path.exists())
optimized = torch._export.aot_load(str(so_path), self.device)
actual = optimized(*example_inputs)
self.assertTrue(torch.allclose(actual, expected))
@unittest.skipIf(
torch.version.hip is None and _get_torch_cuda_version() < (12, 8),
"Test is only supported on CUDA 12.8+",
)
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@skipIfXpu # doesn't support multi-arch binary
def test_compile_after_package_multi_arch(self):
if self.device != GPU_TYPE:
raise unittest.SkipTest("Only meant to test GPU_TYPE")
self.check_package_cpp_only()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x, y):
return x + self.linear(y)
with torch.no_grad():
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
model = Model().to(device=self.device)
expected = model(*example_inputs)
options = {
"aot_inductor.package_cpp_only": self.package_cpp_only,
# Expect kernel to be embedded in the final binary.
# We will make it the default behavior for the standalone mode.
"aot_inductor.emit_multi_arch_kernel": True,
"aot_inductor.embed_kernel_binary": True,
}
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
build_path, _ = self.cmake_compile(
model, example_inputs, options, tmp_dir
)
# Check if the .so file was build successfully
so_path = build_path / "libaoti_model.so"
self.assertTrue(so_path.exists())
optimized = torch._export.aot_load(str(so_path), self.device)
actual = optimized(*example_inputs)
self.assertTrue(torch.allclose(actual, expected))
@unittest.skipIf(
_get_torch_cuda_version() < (12, 6), "Test is only supported on CUDA 12.6+"
)
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@skipIfXpu # build system may be different
@torch._inductor.config.patch("test_configs.use_libtorch", True)
def test_compile_after_package_static(self):
# compile_standalone will set package_cpp_only=True
self.check_package_cpp_only()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x, y):
return x + self.linear(y)
with torch.no_grad():
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
model = Model().to(device=self.device)
# Test compilation when no name is passed in
options = {
"aot_inductor_mode.compile_standalone": True,
}
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
build_path, _ = self.cmake_compile(
model, example_inputs, options, tmp_dir
)
# Check if the .a file was build successfully
a_path = build_path / "libaoti_model.a"
self.assertTrue(a_path.exists())
# Test compilation when model name is passed in
options = {
"aot_inductor_mode.compile_standalone": True,
"aot_inductor.model_name_for_generated_files": "linear",
}
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
build_path, _ = self.cmake_compile(
model, example_inputs, options, tmp_dir
)
# Check if the .a file was build successfully
a_path = build_path / "liblinear.a"
self.assertTrue(a_path.exists())
# test invalid model name
options = {
"aot_inductor_mode.compile_standalone": True,
"aot_inductor.model_name_for_generated_files": "linear/linear",
}
with self.assertRaisesRegex(Exception, "Invalid AOTI model name"):
self.cmake_compile(model, example_inputs, options, "")
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@skipIfXpu # build system may be different
@torch._inductor.config.patch("test_configs.use_libtorch", True)
def test_compile_standalone_cos(self):
# compile_standalone will set package_cpp_only=True
self.check_package_cpp_only()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return torch.cos(x)
with torch.no_grad():
example_inputs = (torch.randn(8, 32, device=self.device),)
model = Model().to(device=self.device)
# Test compilation when model name is passed in
options = {
"aot_inductor_mode.compile_standalone": True,
"aot_inductor.model_name_for_generated_files": "cos",
}
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
build_path, _ = self.cmake_compile(
model, example_inputs, options, tmp_dir
)
# Check if the .a file was build successfully
a_path = build_path / "libcos.a"
self.assertTrue(a_path.exists())
@unittest.skipIf(
torch.version.hip is None and _get_torch_cuda_version() < (12, 6),
"Test is only supported on CUDA 12.6+",
)
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@skipIfXpu # doesn't support multi-arch binary
@torch._inductor.config.patch("test_configs.use_libtorch", True)
def test_compile_with_exporter(self):
self.check_package_cpp_only()
class Model1(torch.nn.Module):
def forward(self, x, y):
return x + y
class Model2(torch.nn.Module):
def forward(self, x, y):
return x - y
def default(*args, **kwargs):
return None
example_inputs = (
torch.ones(3, 3).to(self.device),
torch.ones(3, 3).to(self.device),
)
package = _ExportPackage()
m1 = Model1()
m2 = Model2()
exporter1 = package._exporter("Plus", m1)._define_overload("default", default)
exporter2 = package._exporter("Minus", m2)._define_overload("default", default)
exporter1(*example_inputs)
exporter2(*example_inputs)
for package_example_inputs in [True, False]:
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
package._compiled_and_package(
tmp_dir + "/package.pt2", True, package_example_inputs
)
# Test compiling generated files
result = self.cmake_compile_and_run(tmp_dir)
if package_example_inputs:
if self.device == GPU_TYPE:
self.assertEqual(
result.stdout,
"output_tensor1\n 2 2 2\n 2 2 2\n 2 2 2\n[ CUDAFloatType{3,3} ]\noutput_tensor2\n 0 0 0\n"
" 0 0 0\n 0 0 0\n[ CUDAFloatType{3,3} ]\n",
)
else:
self.assertEqual(
result.stdout,
"output_tensor1\n 2 2 2\n 2 2 2\n 2 2 2\n[ CPUFloatType{3,3} ]\noutput_tensor2\n 0 0 0\n"
" 0 0 0\n 0 0 0\n[ CPUFloatType{3,3} ]\n",
)
@unittest.skipIf(
torch.version.hip is None and _get_torch_cuda_version() < (12, 6),
"Test is only supported on CUDA 12.6+",
)
@unittest.skipIf(IS_FBCODE, "cmake won't work in fbcode")
@skipIfXpu # doesn't support multi-arch binary
@torch._inductor.config.patch("test_configs.use_libtorch", True)
def test_compile_with_exporter_weights(self):
self.check_package_cpp_only()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.fc1(x)
return x
def default(*args, **kwargs):
return None
example_inputs = (torch.ones(3, 3).to(self.device),)
package = _ExportPackage()
m1 = Model().to(self.device)
exporter1 = package._exporter("Model", m1)._define_overload("default", default)
exporter1(*example_inputs)
expected_res = m1(*example_inputs)
package_example_inputs = True
with (
tempfile.TemporaryDirectory() as tmp_dir,
):
package._compiled_and_package(
tmp_dir + "/package.pt2", True, package_example_inputs
)
# Test compiling generated files
self.cmake_compile_and_run(tmp_dir)
tensor_model = torch.load(
tmp_dir + "/output_tensor1.pt", weights_only=False
)
true_res = next(iter(tensor_model.parameters()))
self.assertEqual(expected_res, true_res)
def test_metadata(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x, y):
return x + self.linear(y)
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
metadata = {"dummy": "moo"}
with torch.no_grad():
torch.manual_seed(0)
model = Model().to(device=self.device)
ref_model = copy.deepcopy(model)
ref_inputs = copy.deepcopy(example_inputs)
expected = ref_model(*ref_inputs)
inductor_configs = {
"aot_inductor.package_cpp_only": self.package_cpp_only,
"aot_inductor.metadata": metadata,
}
with WritableTempFile(suffix=".pt2") as f:
ep = torch.export.export(model, example_inputs, strict=False)
package_path = torch._inductor.aoti_compile_and_package(
ep, package_path=f.name, inductor_configs=inductor_configs
) # type: ignore[arg-type]
# We can load the metadata w/o loading the actual package
loaded_metadata = (
torch._C._aoti.AOTIModelPackageLoader.load_metadata_from_package(
package_path, "model"
)
)
self.assertEqual(loaded_metadata.get("dummy"), "moo")
device = loaded_metadata["AOTI_DEVICE_KEY"]
current_device_info = torch._inductor.codecache.get_device_information(
device
)
for k, v in current_device_info.items():
self.assertTrue(k in loaded_metadata)
self.assertEqual(v, loaded_metadata[k])
compiled_model = torch._inductor.aoti_load_package(package_path)
actual = compiled_model(*example_inputs)
self.assertEqual(actual, expected)
loaded_metadata = compiled_model.get_metadata() # type: ignore[attr-defined]
self.assertEqual(loaded_metadata.get("dummy"), "moo")
def test_bool_input(self):
# Specialize on whichever branch the example input for b is
class Model(torch.nn.Module):
def forward(self, x, b):
if b:
return x * x
else:
return x + x
example_inputs = (torch.randn(3, 3, device=self.device), True)
self.check_model(Model(), example_inputs)
def test_multiple_methods(self):
options = {
"aot_inductor.package": True,
"aot_inductor.package_cpp_only": self.package_cpp_only,
}
class Model1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.cat([a, b], dim=0)
dim0_a = Dim("dim0_a", min=1, max=10)
dim0_b = Dim("dim0_b", min=1, max=20)
dynamic_shapes = {"a": {0: dim0_a}, "b": {0: dim0_b}}
example_inputs1 = (
torch.randn(2, 4, device=self.device),
torch.randn(3, 4, device=self.device),
)
ep1 = torch.export.export(
Model1(), example_inputs1, dynamic_shapes=dynamic_shapes, strict=True
)
aoti_files1 = torch._inductor.aot_compile(
ep1.module(), example_inputs1, options=options
)
class Model2(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
def forward(self, x):
t = torch.tensor(x.size(-1), device=self.device, dtype=torch.float)
t = torch.sqrt(t * 3)
return x * t
example_inputs2 = (torch.randn(5, 5, device=self.device),)
ep2 = torch.export.export(Model2(self.device), example_inputs2, strict=True)
aoti_files2 = torch._inductor.aot_compile(
ep2.module(), example_inputs2, options=options
)
with WritableTempFile(suffix=".pt2") as f:
package_path = package_aoti(
f.name, {"model1": aoti_files1, "model2": aoti_files2}
)
loaded1 = load_package(package_path, "model1")
loaded2 = load_package(package_path, "model2")
self.assertEqual(loaded1(*example_inputs1), ep1.module()(*example_inputs1))
self.assertEqual(loaded2(*example_inputs2), ep2.module()(*example_inputs2))
@unittest.skipIf(not HAS_GPU, "requires gpu")
def test_duplicate_calls(self):
options = {
"aot_inductor.package": True,
}
device = GPU_TYPE
class Model1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.cat([a, b], dim=0)
dim0_a = Dim("dim0_a", min=1, max=10)
dim0_b = Dim("dim0_b", min=1, max=20)
dynamic_shapes = {"a": {0: dim0_a}, "b": {0: dim0_b}}
example_inputs1 = (
torch.randn(2, 4, device=device),
torch.randn(3, 4, device=device),
)
self.check_model(Model1(), example_inputs1)
ep1 = torch.export.export(
Model1(), example_inputs1, dynamic_shapes=dynamic_shapes, strict=True
)
aoti_files1 = torch._inductor.aot_compile(
ep1.module(), example_inputs1, options=options
)
device = "cpu"
example_inputs2 = (
torch.randn(2, 4, device=device),
torch.randn(3, 4, device=device),
)
ep2 = torch.export.export(
Model1(), example_inputs2, dynamic_shapes=dynamic_shapes, strict=True
)
aoti_files2 = torch._inductor.aot_compile(
ep2.module(), example_inputs2, options=options
)
with WritableTempFile(suffix=".pt2") as f:
package_path = package_aoti(
f.name, {"model1": aoti_files1, "model2": aoti_files2}
)
loaded1 = load_package(package_path, "model1")
loaded2 = load_package(package_path, "model2")
self.assertTrue(
torch.allclose(loaded1(*example_inputs1), ep1.module()(*example_inputs1))
)
self.assertTrue(
torch.allclose(loaded2(*example_inputs2), ep2.module()(*example_inputs2))
)
def test_specified_output_dir(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.cat([a, b], dim=0)
example_inputs = (
torch.randn(2, 4, device=self.device),
torch.randn(3, 4, device=self.device),
)
ep = torch.export.export(Model(), example_inputs, strict=True)
aoti_files = torch._inductor.aot_compile(
ep.module(),
example_inputs,
options={
"aot_inductor.output_path": "tmp_output_",
"aot_inductor.package": True,
"aot_inductor.package_cpp_only": self.package_cpp_only,
},
)
with WritableTempFile(suffix=".pt2") as f:
package_path = package_aoti(f.name, {"model1": aoti_files})
loaded = load_package(package_path, "model1")
self.assertTrue(
torch.allclose(loaded(*example_inputs), ep.module()(*example_inputs))
)
def test_save_buffer(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.cat([a, b], dim=0)
example_inputs = (
torch.randn(2, 4, device=self.device),
torch.randn(3, 4, device=self.device),
)
ep = torch.export.export(Model(), example_inputs, strict=True)
buffer = io.BytesIO()
buffer = torch._inductor.aoti_compile_and_package(ep, package_path=buffer) # type: ignore[arg-type]
for _ in range(2):
loaded = load_package(buffer)
self.assertTrue(
torch.allclose(loaded(*example_inputs), ep.module()(*example_inputs))
)
@skipif(
lambda device, package_cpp_only: package_cpp_only,
"No support for cpp only",
)
def test_package_without_weight(self):
class Model(torch.nn.Module):
def __init__(self, n, k, device):
super().__init__()
self.linear = torch.nn.Linear(k, n, device=device)
def forward(self, a):
return self.linear(a)
M, N, K = 128, 2048, 4096
model = Model(N, K, self.device)
example_inputs = (torch.randn(M, K, device=self.device),)
inductor_configs = {
"always_keep_tensor_constants": True,
"aot_inductor.package_constants_in_so": False,
}
compiled = compile(model, example_inputs, inductor_configs=inductor_configs)
self.assertEqual(
set(compiled.get_constant_fqns()), set(model.state_dict().keys())
)
compiled.load_constants(model.state_dict(), check_full_update=True)
test_inputs = torch.randn(M, K, device=self.device)
expected = model(test_inputs)
output = compiled(test_inputs)
self.assertEqual(expected, output)
@skipif(
lambda device, package_cpp_only: package_cpp_only,
"No support for cpp only",
)
def test_package_user_managed_weight(self):
class Model(torch.nn.Module):
def __init__(self, n, k, device):
super().__init__()
self.linear = torch.nn.Linear(k, n, device=device)
def forward(self, a):
return self.linear(a)
M, N, K = 128, 4096, 4096
model = Model(N, K, self.device)
example_inputs = (torch.randn(M, K, device=self.device),)
inductor_configs = {
"always_keep_tensor_constants": True,
"aot_inductor.package_constants_in_so": False,
}
compiled = compile(model, example_inputs, inductor_configs=inductor_configs)
self.assertEqual(
set(compiled.get_constant_fqns()), set(model.state_dict().keys())
)
compiled.load_constants(
model.state_dict(), check_full_update=True, user_managed=False
)
test_inputs = torch.randn(M, K, device=self.device)
expected = model(test_inputs)
output = compiled(test_inputs)
self.assertEqual(expected, output)
# Let's try to modify the weight in-place, result shouldn't change.
model.linear.weight.data *= 3.7
new_output = compiled(test_inputs)
self.assertEqual(new_output, output)
# Recreate a new model that we will test against user_managed=True
new_compiled = compile(model, example_inputs, inductor_configs=inductor_configs)
new_compiled.load_constants(
model.state_dict(), check_full_update=True, user_managed=True
)
expected = model(test_inputs)
new_output = new_compiled(test_inputs)
self.assertEqual(expected, new_output)
# Try to modify the weight in-place, result should change.
model.linear.weight.data *= 3.7
expected = model(test_inputs)
new_output = new_compiled(test_inputs)
self.assertEqual(new_output, expected)
def test_deepcopy_compiled_model(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
example_inputs = (
torch.randn(10, 10, device=self.device),
torch.randn(10, 10, device=self.device),
)
model = Model()
compiled = compile(model, example_inputs)
copmiled_copy = copy.deepcopy(compiled)
expected = model(*example_inputs)
output = compiled(*example_inputs)
output_copy = copmiled_copy(*example_inputs)
self.assertEqual(expected, output)
self.assertEqual(expected, output_copy)
@skipif(
lambda device, package_cpp_only: package_cpp_only,
"No support for cpp only",
)
def test_update_weights(self):
class Model(torch.nn.Module):
def __init__(self, n, k, device):
super().__init__()
self.linear = torch.nn.Linear(k, n, device=device)
def forward(self, a):
return self.linear(a)
M, N, K = 128, 2048, 4096
model = Model(N, K, self.device)
example_inputs = (torch.randn(M, K, device=self.device),)
compiled = self.check_model(model, example_inputs)
new_state_dict = {
"linear.weight": torch.randn(N, K, device=self.device),
"linear.bias": torch.randn(N, device=self.device),
}
model.load_state_dict(new_state_dict)
compiled.load_constants(model.state_dict(), check_full_update=True)
test_inputs = torch.randn(M, K, device=self.device)
expected = model(test_inputs)
output = compiled(test_inputs)
self.assertEqual(expected, output)
@skipif(
lambda device, package_cpp_only: package_cpp_only,
"No support for cpp only",
)
def test_package_shared_weights(self):
options = {
"aot_inductor.package": True,
"aot_inductor.package_cpp_only": self.package_cpp_only,
"always_keep_tensor_constants": True,
"aot_inductor.package_constants_in_so": False,
"aot_inductor.package_constants_on_disk_format": "pickle_weights",
}
class Bar(torch.nn.Module):
def __init__(self, p1, p2):
super().__init__()
self.p1 = p1
self.register_buffer("p2", p2)
def forward(self):
self.p1 += 1
self.p2 += 1
return self.p1, self.p2
class Bar2(torch.nn.Module):
def __init__(self, p1, p2):
super().__init__()
self.p1 = p1
self.register_buffer("p2", p2[2:3])
def forward(self):
self.p1 += 3
self.p2 += 3
return self.p1, self.p2
x = torch.randn(3, 4)
y = torch.randn(3, 4)
buffer = torch.nn.Buffer(x.clone())
buffer2 = torch.nn.Buffer(y.clone())
bar1 = Bar(buffer, buffer2)
bar2 = Bar2(buffer, buffer2)
ep1 = torch.export.export(bar1, ())
ep2 = torch.export.export(bar2, ())
aoti_files1 = torch._inductor.aot_compile(ep1.module(), (), options=options)
aoti_files2 = torch._inductor.aot_compile(ep2.module(), (), options=options)
with WritableTempFile(suffix=".pt2") as f:
package_path = package_aoti(
f.name,
{"model1": aoti_files1, "model2": aoti_files2},
)
pt2_contents = load_pt2(package_path, load_weights_from_disk=True)
loaded1 = pt2_contents.aoti_runners["model1"]
loaded2 = pt2_contents.aoti_runners["model2"]
# note that loading like below doesn't work, because new weights will be loaded
# for each load_package call.
# loaded1 = load_package(package_path, "model1")
# loaded2 = load_package(package_path, "model2")
result_1_p1, result_1_p2 = loaded1()
self.assertEqual(result_1_p1, x + 1)
self.assertEqual(result_1_p2, y + 1)
result_2_p1, result_2_p2 = loaded2()
# the result already incremented by 1 from the run above
self.assertEqual(result_2_p1, x + 4)
self.assertEqual(result_2_p2, y[2:3] + 4)
# note that the returned result will not change though p2 changed
self.assertEqual(result_1_p2, y + 1)
# test shared weights but user managed
gm1 = ep1.module()
gm2 = ep2.module()
load_weights_to_pt2_contents(
pt2_contents, {"model1": gm1.state_dict(), "model2": gm2.state_dict()}
)
result_1_p1, result_1_p2 = loaded1()
self.assertEqual(result_1_p1, x + 1)
self.assertEqual(result_1_p2, y + 1)
self.assertEqual(gm1.p1, x + 1)
self.assertEqual(gm1.p2, y + 1)
@skipif(
lambda device, package_cpp_only: package_cpp_only,
"No support for cpp only",
)
def test_package_weights_on_disk_nested_module(self):
options = {
"aot_inductor.package": True,
"aot_inductor.package_cpp_only": self.package_cpp_only,
"always_keep_tensor_constants": True,
"aot_inductor.package_constants_in_so": False,
"aot_inductor.package_constants_on_disk_format": "pickle_weights",
}
# linear.weight's node name is linear_weight.
# This unit test tests that we package the right weight name
# `liear.weight`, but not `linear_weight`
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
x = torch.randn(3, 3).to(self.device)
bar1 = Bar().to(self.device)
ep = torch.export.export(bar1, (x,))
package_path = torch._inductor.aoti_compile_and_package(
ep, inductor_configs=options
)
pt2_contents = load_pt2(package_path, load_weights_from_disk=True)
loaded1 = pt2_contents.aoti_runners["model"]
self.assertEqual(loaded1(x), bar1(x))
def test_loading_wrong_model(self):
class Model(torch.nn.Module):
def forward(self, x):
return x + 1
example_inputs = (torch.randn(10, 10, device=self.device),)
model = Model()
ep = torch.export.export(model, example_inputs)
package_path = torch._inductor.aoti_compile_and_package(ep)
with self.assertRaisesRegex(
RuntimeError,
"Failed to find a generated cpp file or so file for model 'forward' in the zip archive.",
):
load_package(package_path, model_name="forward")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU or sys.platform == "darwin":
run_tests(needs="filelock")
| TestAOTInductorPackage |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/fsdp.py | {
"start": 34791,
"end": 41236
} | class ____(_BackwardSyncControl):
@override
def no_backward_sync(self, module: Module, enabled: bool) -> AbstractContextManager:
"""Blocks gradient synchronization inside the :class:`~torch.distributed.fsdp.FullyShardedDataParallel`
wrapper."""
if not enabled:
return nullcontext()
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel
if not isinstance(module, FullyShardedDataParallel):
# the root must be wrapped
raise TypeError(
"Blocking backward sync is only possible if the module passed to"
f" `{type(self).__name__}.no_backward_sync` is wrapped in `FullyShardedDataParallel`."
f" Got: {module.__class__.__name__}."
)
return module.no_sync()
def _init_cpu_offload(cpu_offload: Optional[Union[bool, "CPUOffload"]]) -> "CPUOffload":
from torch.distributed.fsdp import CPUOffload
return cpu_offload if isinstance(cpu_offload, CPUOffload) else CPUOffload(offload_params=bool(cpu_offload))
def _init_sharding_strategy(sharding_strategy: "_SHARDING_STRATEGY", kwargs: dict) -> "ShardingStrategy":
from torch.distributed.fsdp import ShardingStrategy
if kwargs.get("process_group") is not None and kwargs.get("device_mesh") is not None:
raise ValueError(
"The arguments `FSDPStrategy(process_group=..., device_mesh=...)` are mutually exclusive."
"Pass only one of them."
)
strategy = ShardingStrategy[sharding_strategy.upper()] if isinstance(sharding_strategy, str) else sharding_strategy
if (
"HYBRID" in strategy.name
and kwargs.get("auto_wrap_policy") is None
and kwargs.get("process_group") is None
and kwargs.get("device_mesh") is None
):
raise RuntimeError(
"The hybrid sharding strategy requires you to pass at least one of the parameters: `auto_wrap_policy`,"
" `process_group` tuple, or `device_mesh`."
)
return strategy
def _optimizer_has_flat_params(optimizer: Optimizer) -> bool:
return any(
getattr(param, "_fsdp_flattened", False) for group in optimizer.param_groups for param in group["params"]
)
def _get_sharded_state_dict_context(module: Module) -> Generator[None, None, None]:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType
state_dict_config = ShardedStateDictConfig(offload_to_cpu=True)
optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True)
state_dict_type_context = FSDP.state_dict_type(
module=module,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=state_dict_config,
optim_state_dict_config=optim_state_dict_config,
)
return state_dict_type_context # type: ignore[return-value]
def _get_full_state_dict_context(
module: Module, world_size: int, rank0_only: bool = True
) -> Generator[None, None, None]:
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import FullOptimStateDictConfig
state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
state_dict_type_context = FSDP.state_dict_type(
module=module,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=state_dict_config,
optim_state_dict_config=optim_state_dict_config,
)
return state_dict_type_context # type: ignore[return-value]
def _is_sharded_checkpoint(path: Path) -> bool:
"""A heuristic check to determine whether the path points to a directory with checkpoint shards."""
return path.is_dir() and (path / _METADATA_FILENAME).is_file()
def _is_full_checkpoint(path: Path) -> bool:
return path.is_file()
def _has_fsdp_modules(module: object) -> TypeGuard[Module]:
from torch.distributed.fsdp import FullyShardedDataParallel
return isinstance(module, Module) and any(isinstance(m, FullyShardedDataParallel) for m in module.modules())
def _move_torchmetrics_to_device(module: torch.nn.Module, device: torch.device) -> None:
# FSDP doesn't move modules without parameters (e.g. Metrics) to the device
# https://github.com/pytorch/pytorch/issues/113113
if not RequirementCache("torchmetrics"):
return
from torchmetrics import Metric
for metric in (m for m in module.modules() if isinstance(m, Metric)):
metric.to(device) # `.to()` is in-place
def _distributed_checkpoint_save(converted_state: dict[str, Any], path: Path) -> None:
if _TORCH_GREATER_EQUAL_2_3:
from torch.distributed.checkpoint import save
# let torch automatically infer the writer to use. This might also support fsspec paths in the future
# https://github.com/pytorch/pytorch/issues/118036
save(converted_state, checkpoint_id=path)
else: # deprecated
from torch.distributed.checkpoint import FileSystemWriter
if _TORCH_GREATER_EQUAL_2_2:
from torch.distributed.checkpoint import save
else:
from torch.distributed.checkpoint import save_state_dict as save
# FSDP's FileSystemWriter streams the tensors to disk to minimize memory peaks
writer = FileSystemWriter(path=path, single_file_per_rank=True)
save(converted_state, writer)
def _distributed_checkpoint_load(module_state: dict[str, Any], path: Path) -> None:
if _TORCH_GREATER_EQUAL_2_3:
from torch.distributed.checkpoint import load
# let torch automatically infer the reader to use. This might also support fsspec paths in the future
# https://github.com/pytorch/pytorch/issues/118036
load(module_state, checkpoint_id=path)
else: # deprecated
from torch.distributed.checkpoint import FileSystemReader
if _TORCH_GREATER_EQUAL_2_2:
from torch.distributed.checkpoint import load
else:
from torch.distributed.checkpoint import load_state_dict as load
reader = FileSystemReader(path=path)
load(module_state, reader)
| _FSDPBackwardSyncControl |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 16164,
"end": 17708
} | class ____(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers that store their results in a namespace."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
current = getattr(namespace, self.dest)
if current and self.limit_one:
if state.mode == ParserMode.PARSE:
raise ParserError('Option cannot be specified more than once.')
raise CompletionError('Option cannot be specified more than once.')
value = self.get_value(state)
if self.use_list:
if not current:
current = []
setattr(namespace, self.dest, current)
current.append(value)
else:
setattr(namespace, self.dest, value)
return value
def get_value(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
return super().parse(state)
@property
def use_list(self) -> bool:
"""True if the destination is a list, otherwise False."""
return False
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return not self.use_list
@property
@abc.abstractmethod
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
| NamespaceParser |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_packaging.py | {
"start": 12065,
"end": 13299
} | class ____:
def test_fail_when_missing_metadata_docker_image_tag(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={})
# Act
result = packaging.CheckVersionFollowsSemver()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"dockerImageTag is missing in {consts.METADATA_FILE_NAME}"
def test_fail_when_version_is_not_semver(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={"dockerImageTag": "1.1"})
# Act
result = packaging.CheckVersionFollowsSemver()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"Connector version {connector.metadata['dockerImageTag']} does not follow semantic versioning" in result.message
def test_pass_when_version_follows_semver(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={"dockerImageTag": "1.1.1"})
# Act
result = packaging.CheckVersionFollowsSemver()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert "Connector version follows semantic versioning" in result.message
| TestCheckVersionFollowsSemver |
python | facebookresearch__faiss | tests/test_build_blocks.py | {
"start": 1822,
"end": 2160
} | class ____(unittest.TestCase):
def test_maplong2long(self):
keys = np.array([13, 45, 67], dtype=np.int64)
vals = np.array([3, 8, 2], dtype=np.int64)
m = faiss.MapLong2Long()
m.add(keys, vals)
assert np.all(m.search_multiple(keys) == vals)
assert m.search(12343) == -1
| TestMapLong2Long |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 2356,
"end": 5241
} | class ____(ModelViewSet):
pagination_class = ExamplePagination
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
filter_backends = [filters.OrderingFilter]
serializer_class = ExampleSerializer
@action(methods=['post'], detail=True, serializer_class=AnotherSerializer)
def custom_action(self, request, pk):
"""
A description of custom action.
"""
raise NotImplementedError
@action(methods=['post'], detail=True, serializer_class=AnotherSerializerWithDictField)
def custom_action_with_dict_field(self, request, pk):
"""
A custom action using a dict field in the serializer.
"""
raise NotImplementedError
@action(methods=['post'], detail=True, serializer_class=AnotherSerializerWithListFields)
def custom_action_with_list_fields(self, request, pk):
"""
A custom action using both list field and list serializer in the serializer.
"""
raise NotImplementedError
@action(detail=False)
def custom_list_action(self, request):
raise NotImplementedError
@action(methods=['post', 'get'], detail=False, serializer_class=EmptySerializer)
def custom_list_action_multiple_methods(self, request):
"""Custom description."""
raise NotImplementedError
@custom_list_action_multiple_methods.mapping.delete
def custom_list_action_multiple_methods_delete(self, request):
"""Deletion description."""
raise NotImplementedError
@action(detail=False, schema=None)
def excluded_action(self, request):
pass
def get_serializer(self, *args, **kwargs):
assert self.request
assert self.action
return super().get_serializer(*args, **kwargs)
@action(methods=['get', 'post'], detail=False)
def documented_custom_action(self, request):
"""
get:
A description of the get method on the custom action.
post:
A description of the post method on the custom action.
"""
pass
@documented_custom_action.mapping.put
def put_documented_custom_action(self, request, *args, **kwargs):
"""
A description of the put method on the custom action from mapping.
"""
pass
with override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'}):
if coreapi:
schema_view = get_schema_view(title='Example API')
else:
def schema_view(request):
pass
router = DefaultRouter()
router.register('example', ExampleViewSet, basename='example')
urlpatterns = [
path('', schema_view),
path('', include(router.urls))
]
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(ROOT_URLCONF=__name__, REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| ExampleViewSet |
python | kamyu104__LeetCode-Solutions | Python/best-time-to-buy-and-sell-stock-iv.py | {
"start": 115,
"end": 3208
} | class ____(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
profits = []
v_p_stk = [] # mono stack, where v is increasing and p is strictly decreasing
v, p = -1, -1
while p+1 < len(prices): # at most O(n) peaks, so v_p_stk and profits are both at most O(n) space
for v in xrange(p+1, len(prices)-1):
if prices[v] < prices[v+1]:
break
else:
v = len(prices)-1
for p in xrange(v, len(prices)-1):
if prices[p] > prices[p+1]:
break
else:
p = len(prices)-1
while v_p_stk and prices[v_p_stk[-1][0]] > prices[v]: # not overlapped
last_v, last_p = v_p_stk.pop()
profits.append(prices[last_p]-prices[last_v]) # count [prices[last_v], prices[last_p]] interval
while v_p_stk and prices[v_p_stk[-1][1]] <= prices[p]: # overlapped
# prices[last_v] <= prices[v] <= prices[last_p] <= prices[p],
# treat overlapped as [prices[v], prices[last_p]], [prices[last_v], prices[p]] intervals due to invariant max profit
last_v, last_p = v_p_stk.pop()
profits.append(prices[last_p]-prices[v]) # count [prices[v], prices[last_p]] interval
v = last_v
v_p_stk.append((v, p)) # keep [prices[last_v], prices[p]] interval to check overlapped
while v_p_stk:
last_v, last_p = v_p_stk.pop()
profits.append(prices[last_p]-prices[last_v]) # count [prices[last_v], prices[last_p]] interval
if k > len(profits):
k = len(profits)
else:
nth_element(profits, k-1, compare=lambda a, b: a > b)
return sum(profits[i] for i in xrange(k)) # top k profits of nonoverlapped intervals
# Time: O(k * n)
# Space: O(k)
| Solution |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 5399,
"end": 9339
} | class ____(nn.Module):
def __init__(self, config: DFineConfig):
"""
D-Fine version of multiscale deformable attention
"""
super().__init__()
self.d_model = config.d_model
self.n_heads = config.decoder_attention_heads
self.n_levels = config.num_feature_levels
self.offset_scale = config.decoder_offset_scale
self.decoder_method = config.decoder_method
self.n_points = config.decoder_n_points
if isinstance(self.n_points, list):
num_points_list = self.n_points
else:
num_points_list = [self.n_points for _ in range(self.n_levels)]
self.num_points_list = num_points_list
num_points_scale = [1 / n for n in self.num_points_list for _ in range(n)]
self.register_buffer("num_points_scale", torch.tensor(num_points_scale, dtype=torch.float32))
self.total_points = self.n_heads * sum(self.num_points_list)
self.sampling_offsets = nn.Linear(self.d_model, self.total_points * 2)
self.attention_weights = nn.Linear(self.d_model, self.total_points)
self.ms_deformable_attn_core = multi_scale_deformable_attention_v2
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
reference_points=None,
encoder_hidden_states=None,
spatial_shapes=None,
spatial_shapes_list=None,
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if not is_torchdynamo_compiling() and (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
# Reshape for multi-head attention
value = encoder_hidden_states.reshape(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
if attention_mask is not None:
value = value.masked_fill(~attention_mask[..., None], float(0))
sampling_offsets: torch.Tensor = self.sampling_offsets(hidden_states)
sampling_offsets = sampling_offsets.reshape(
batch_size, num_queries, self.n_heads, sum(self.num_points_list), 2
)
attention_weights = self.attention_weights(hidden_states).reshape(
batch_size, num_queries, self.n_heads, sum(self.num_points_list)
)
attention_weights = F.softmax(attention_weights, dim=-1)
if reference_points.shape[-1] == 2:
offset_normalizer = torch.tensor(spatial_shapes)
offset_normalizer = offset_normalizer.flip([1]).reshape(1, 1, 1, self.n_levels, 1, 2)
sampling_locations = (
reference_points.reshape(batch_size, sequence_length, 1, self.n_levels, 1, 2)
+ sampling_offsets / offset_normalizer
)
elif reference_points.shape[-1] == 4:
# reference_points [8, 480, None, 1, 4]
# sampling_offsets [8, 480, 8, 12, 2]
num_points_scale = self.num_points_scale.to(dtype=hidden_states.dtype).unsqueeze(-1)
offset = sampling_offsets * num_points_scale * reference_points[:, :, None, :, 2:] * self.offset_scale
sampling_locations = reference_points[:, :, None, :, :2] + offset
else:
raise ValueError(
f"Last dim of reference_points must be 2 or 4, but get {reference_points.shape[-1]} instead."
)
output = self.ms_deformable_attn_core(
value,
spatial_shapes_list,
sampling_locations,
attention_weights,
self.num_points_list,
self.decoder_method,
)
return output, attention_weights
| DFineMultiscaleDeformableAttention |
python | numba__numba | numba/cuda/stubs.py | {
"start": 1507,
"end": 1794
} | class ____(Dim3):
'''
The block indices in the grid of thread blocks. Each index is an integer
spanning the range from 0 inclusive to the corresponding value of the
attribute in :attr:`numba.cuda.gridDim` exclusive.
'''
_description_ = '<blockIdx.{x,y,z}>'
| blockIdx |
python | anthropics__anthropic-sdk-python | tests/lib/streaming/test_messages.py | {
"start": 3355,
"end": 6388
} | class ____:
@pytest.mark.respx(base_url=base_url)
def test_basic_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=get_response("basic_response.txt"))
)
with sync_client.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-opus-latest",
) as stream:
with pytest.warns(DeprecationWarning):
assert isinstance(cast(Any, stream), Stream)
assert_basic_response([event for event in stream], stream.get_final_message())
@pytest.mark.respx(base_url=base_url)
def test_context_manager(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=get_response("basic_response.txt"))
)
with sync_client.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-opus-latest",
) as stream:
assert not stream.response.is_closed
# response should be closed even if the body isn't read
assert stream.response.is_closed
@pytest.mark.respx(base_url=base_url)
def test_deprecated_model_warning_stream(self, respx_mock: MockRouter) -> None:
for deprecated_model in DEPRECATED_MODELS:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=get_response("basic_response.txt"))
)
with pytest.warns(DeprecationWarning, match=f"The model '{deprecated_model}' is deprecated"):
with sync_client.messages.stream(
max_tokens=1024,
messages=[{"role": "user", "content": "Hello"}],
model=deprecated_model,
) as stream:
# Consume the stream to ensure the warning is triggered
stream.until_done()
@pytest.mark.respx(base_url=base_url)
def test_tool_use(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=get_response("tool_use_response.txt"))
)
with sync_client.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-sonnet-4-20250514",
) as stream:
with pytest.warns(DeprecationWarning):
assert isinstance(cast(Any, stream), Stream)
assert_tool_use_response([event for event in stream], stream.get_final_message())
| TestSyncMessages |
python | weaviate__weaviate-python-client | mock_tests/conftest.py | {
"start": 11719,
"end": 13345
} | class ____(weaviate_pb2_grpc.WeaviateServicer):
def Search(
self, request: search_get_pb2.SearchRequest, context: grpc.ServicerContext
) -> search_get_pb2.SearchReply:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
context.set_details("Permission denied")
return search_get_pb2.SearchReply()
def TenantsGet(
self, request: tenants_pb2.TenantsGetRequest, context: ServicerContext
) -> tenants_pb2.TenantsGetReply:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
context.set_details("Permission denied")
return tenants_pb2.TenantsGetReply()
def BatchObjects(
self, request: batch_pb2.BatchObjectsRequest, context: grpc.ServicerContext
) -> batch_pb2.BatchObjectsReply:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
context.set_details("Permission denied")
return batch_pb2.BatchObjectsReply()
def BatchDelete(
self, request: batch_delete_pb2.BatchDeleteRequest, context: grpc.ServicerContext
) -> batch_delete_pb2.BatchDeleteReply:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
context.set_details("Permission denied")
return batch_delete_pb2.BatchDeleteReply()
@pytest.fixture(scope="function")
def forbidden(
weaviate_client: weaviate.WeaviateClient, start_grpc_server: grpc.Server
) -> weaviate.collections.Collection:
service = MockForbiddenWeaviateService()
weaviate_pb2_grpc.add_WeaviateServicer_to_server(service, start_grpc_server)
return weaviate_client.collections.use("ForbiddenCollection")
| MockForbiddenWeaviateService |
python | donnemartin__interactive-coding-challenges | graphs_trees/bst_min/test_bst_min.py | {
"start": 153,
"end": 669
} | class ____(unittest.TestCase):
def test_bst_min(self):
min_bst = MinBst()
array = [0, 1, 2, 3, 4, 5, 6]
root = min_bst.create_min_bst(array)
self.assertEqual(height(root), 3)
min_bst = MinBst()
array = [0, 1, 2, 3, 4, 5, 6, 7]
root = min_bst.create_min_bst(array)
self.assertEqual(height(root), 4)
print('Success: test_bst_min')
def main():
test = TestBstMin()
test.test_bst_min()
if __name__ == '__main__':
main()
| TestBstMin |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/attributes.py | {
"start": 2411,
"end": 2539
} | class ____:
buffer: List[str] = []
def test_issue_with_update_to_self_attribute(d: D):
d.buffer.append(_test_source())
| D |
python | doocs__leetcode | solution/3000-3099/3012.Minimize Length of Array Using Operations/Solution.py | {
"start": 0,
"end": 197
} | class ____:
def minimumArrayLength(self, nums: List[int]) -> int:
mi = min(nums)
if any(x % mi for x in nums):
return 1
return (nums.count(mi) + 1) // 2
| Solution |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 37481,
"end": 37654
} | class ____(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
| Pix2Sky_Polyconic |
python | redis__redis-py | tests/test_asyncio/test_multidb/test_healthcheck.py | {
"start": 9790,
"end": 15115
} | class ____:
@pytest.mark.asyncio
async def test_database_is_healthy_when_bdb_matches_by_dns_name(
self, mock_client, mock_cb
):
"""
Ensures health check succeeds when /v1/bdbs contains an endpoint whose dns_name
matches database host, and availability endpoint returns success.
"""
host = "db1.example.com"
mock_client.get_connection_kwargs.return_value = {"host": host}
# Mock HttpClient used inside LagAwareHealthCheck
mock_http = AsyncMock()
mock_http.get.side_effect = [
# First call: list of bdbs
[
{
"uid": "bdb-1",
"endpoints": [
{"dns_name": host, "addr": ["10.0.0.1", "10.0.0.2"]},
],
}
],
# Second call: availability check (no JSON expected)
None,
]
hc = LagAwareHealthCheck(rest_api_port=1234, lag_aware_tolerance=150)
# Inject our mocked http client
hc._http_client = mock_http
db = Database(mock_client, mock_cb, 1.0, "https://healthcheck.example.com")
assert await hc.check_health(db) is True
# Base URL must be set correctly
assert hc._http_client.client.base_url == "https://healthcheck.example.com:1234"
# Calls: first to list bdbs, then to availability
assert mock_http.get.call_count == 2
first_call = mock_http.get.call_args_list[0]
second_call = mock_http.get.call_args_list[1]
assert first_call.args[0] == "/v1/bdbs"
assert (
second_call.args[0]
== "/v1/bdbs/bdb-1/availability?extend_check=lag&availability_lag_tolerance_ms=150"
)
assert second_call.kwargs.get("expect_json") is False
@pytest.mark.asyncio
async def test_database_is_healthy_when_bdb_matches_by_addr(
self, mock_client, mock_cb
):
"""
Ensures health check succeeds when endpoint addr list contains the database host.
"""
host_ip = "203.0.113.5"
mock_client.get_connection_kwargs.return_value = {"host": host_ip}
mock_http = AsyncMock()
mock_http.get.side_effect = [
[
{
"uid": "bdb-42",
"endpoints": [
{"dns_name": "not-matching.example.com", "addr": [host_ip]},
],
}
],
None,
]
hc = LagAwareHealthCheck()
hc._http_client = mock_http
db = Database(mock_client, mock_cb, 1.0, "https://healthcheck.example.com")
assert await hc.check_health(db) is True
assert mock_http.get.call_count == 2
assert (
mock_http.get.call_args_list[1].args[0]
== "/v1/bdbs/bdb-42/availability?extend_check=lag&availability_lag_tolerance_ms=5000"
)
@pytest.mark.asyncio
async def test_raises_value_error_when_no_matching_bdb(self, mock_client, mock_cb):
"""
Ensures health check raises ValueError when there's no bdb matching the database host.
"""
host = "db2.example.com"
mock_client.get_connection_kwargs.return_value = {"host": host}
mock_http = AsyncMock()
# Return bdbs that do not match host by dns_name nor addr
mock_http.get.return_value = [
{
"uid": "a",
"endpoints": [{"dns_name": "other.example.com", "addr": ["10.0.0.9"]}],
},
{
"uid": "b",
"endpoints": [
{"dns_name": "another.example.com", "addr": ["10.0.0.10"]}
],
},
]
hc = LagAwareHealthCheck()
hc._http_client = mock_http
db = Database(mock_client, mock_cb, 1.0, "https://healthcheck.example.com")
with pytest.raises(ValueError, match="Could not find a matching bdb"):
await hc.check_health(db)
# Only the listing call should have happened
mock_http.get.assert_called_once_with("/v1/bdbs")
@pytest.mark.asyncio
async def test_propagates_http_error_from_availability(self, mock_client, mock_cb):
"""
Ensures that any HTTP error raised by the availability endpoint is propagated.
"""
host = "db3.example.com"
mock_client.get_connection_kwargs.return_value = {"host": host}
mock_http = AsyncMock()
# First: list bdbs -> match by dns_name
mock_http.get.side_effect = [
[{"uid": "bdb-err", "endpoints": [{"dns_name": host, "addr": []}]}],
# Second: availability -> raise HttpError
HttpError(
url=f"https://{host}:9443/v1/bdbs/bdb-err/availability",
status=503,
message="busy",
),
]
hc = LagAwareHealthCheck()
hc._http_client = mock_http
db = Database(mock_client, mock_cb, 1.0, "https://healthcheck.example.com")
with pytest.raises(HttpError, match="busy") as e:
await hc.check_health(db)
assert e.status == 503
# Ensure both calls were attempted
assert mock_http.get.call_count == 2
| TestLagAwareHealthCheck |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 14353,
"end": 17597
} | class ____(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.patch_embeddings = GroupViTPatchEmbeddings(
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.hidden_size,
)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
self.dropout = nn.Dropout(config.dropout)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and no class embeddings.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
embeddings = self.layernorm(embeddings)
batch_size, seq_len, _ = embeddings.size()
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT
| GroupViTVisionEmbeddings |
python | numpy__numpy | tools/swig/test/testMatrix.py | {
"start": 11513,
"end": 11776
} | class ____(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
| longTestCase |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip_text.py | {
"start": 9306,
"end": 9997
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242
| BlipTextSelfOutput |
python | astropy__astropy | astropy/coordinates/angles/errors.py | {
"start": 642,
"end": 1153
} | class ____(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return (
f"An invalid value for 'hours' was found ('{self.hour}'); must be in the"
" range [0,24)."
)
| IllegalHourError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-retently/components.py | {
"start": 397,
"end": 735
} | class ____(DeclarativeAuthenticator):
config: Mapping[str, Any]
api_auth: ApiKeyAuthenticator
oauth: DeclarativeOauth2Authenticator
def __new__(cls, api_auth, oauth, config, *args, **kwargs):
if config["credentials"]["api_key"]:
return api_auth
else:
return oauth
| AuthenticatorRetently |
python | numpy__numpy | numpy/ma/core.py | {
"start": 5453,
"end": 25168
} | class ____(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': b'???',
'U': 'N/A',
'T': 'N/A'
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
float_types_list = [np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble]
_minvals: dict[type, int] = {}
_maxvals: dict[type, int] = {}
for sctype in ntypes.sctypeDict.values():
scalar_dtype = np.dtype(sctype)
if scalar_dtype.kind in "Mm":
info = np.iinfo(np.int64)
min_val, max_val = info.min + 1, info.max
elif np.issubdtype(scalar_dtype, np.integer):
info = np.iinfo(sctype)
min_val, max_val = info.min, info.max
elif np.issubdtype(scalar_dtype, np.floating):
info = np.finfo(sctype)
min_val, max_val = info.min, info.max
elif scalar_dtype.kind == "b":
min_val, max_val = 0, 1
else:
min_val, max_val = None, None
_minvals[sctype] = min_val
_maxvals[sctype] = max_val
max_filler = _minvals
max_filler.update([(k, -np.inf) for k in float_types_list[:4]])
max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])
min_filler = _maxvals
min_filler.update([(k, +np.inf) for k in float_types_list[:4]])
min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])
del float_types_list
def _recursive_fill_value(dtype, f):
"""
Recursively produce a fill value for `dtype`, calling f on scalar dtypes
"""
if dtype.names is not None:
# We wrap into `array` here, which ensures we use NumPy cast rules
# for integer casts, this allows the use of 99999 as a fill value
# for int8.
# TODO: This is probably a mess, but should best preserve behavior?
vals = tuple(
np.array(_recursive_fill_value(dtype[name], f))
for name in dtype.names)
return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
elif dtype.subdtype:
subtype, shape = dtype.subdtype
subval = _recursive_fill_value(subtype, f)
return np.full(shape, subval)
else:
return f(dtype)
def _get_dtype_of(obj):
""" Convert the argument for *_fill_value into a dtype """
if isinstance(obj, np.dtype):
return obj
elif hasattr(obj, 'dtype'):
return obj.dtype
else:
return np.asanyarray(obj).dtype
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
=========== ========
datatype default
=========== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
StringDType 'N/A'
=========== ========
For structured types, a structured scalar is returned, with each field the
default fill value for its type.
For subarray types, the fill value is an array of the same size containing
the default scalar fill value.
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> import numpy as np
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
def _scalar_fill_value(dtype):
if dtype.kind in 'Mm':
return default_filler.get(dtype.str[1:], '?')
else:
return default_filler.get(dtype.kind, '?')
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def _extremum_fill_value(obj, extremum, extremum_name):
def _scalar_fill_value(dtype):
try:
return extremum[dtype.type]
except KeyError as e:
raise TypeError(
f"Unsuitable type {dtype} for calculating {extremum_name}."
) from None
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue : scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt : dtype
The structured dtype for which to create the fill value.
Returns
-------
val : tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names is not None:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
if fill_value is None:
fill_value = default_fill_value(ndtype)
# TODO: It seems better to always store a valid fill_value, the oddity
# about is that `_fill_value = None` would behave even more
# different then.
# (e.g. this allows arr_uint8.astype(int64) to have the default
# fill value again...)
# The one thing that changed in 2.0/2.1 around cast safety is that the
# default `int(99...)` is not a same-kind cast anymore, so if we
# have a uint, use the default uint.
if ndtype.kind == "u":
fill_value = np.uint(fill_value)
elif ndtype.names is not None:
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.asarray(fill_value, dtype=ndtype)
except ValueError as e:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, ndtype)) from e
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
# Also in case of converting string arrays.
try:
fill_value = np.asarray(fill_value, dtype=ndtype)
except (OverflowError, ValueError) as e:
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype)) from e
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = list(range(5))
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an `~numpy.ndarray`, with masked values replaced by
`fill_value`.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` with no masked values, then ``a.data`` is
returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : array_like, optional.
Can be scalar or non-scalar. If non-scalar, the
resulting filled array should be broadcastable
over input array. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=333)
array([[333, 1, 2],
[333, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=np.arange(3))
array([[0, 1, 2],
[0, 4, 5],
[6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=None, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data=[--, -1.0, nan, inf],
mask=[ True, False, False, False],
fill_value=1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data=[--, -1.0, --, --],
mask=[ True, False, True, True],
fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
array([ 1., -1., nan, inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
def is_string_or_list_of_strings(val):
return (isinstance(val, str) or
(isinstance(val, list) and val and
builtins.all(isinstance(s, str) for s in val)))
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
| MaskError |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 19813,
"end": 20946
} | class ____(Processor):
"""
Make leading whitespace visible.
:param get_char: Callable that returns one character.
"""
def __init__(
self,
get_char: Callable[[], str] | None = None,
style: str = "class:leading-whitespace",
) -> None:
def default_get_char() -> str:
if "\xb7".encode(get_app().output.encoding(), "replace") == b"?":
return "."
else:
return "\xb7"
self.style = style
self.get_char = get_char or default_get_char
def apply_transformation(self, ti: TransformationInput) -> Transformation:
fragments = ti.fragments
# Walk through all te fragments.
if fragments and fragment_list_to_text(fragments).startswith(" "):
t = (self.style, self.get_char())
fragments = explode_text_fragments(fragments)
for i in range(len(fragments)):
if fragments[i][1] == " ":
fragments[i] = t
else:
break
return Transformation(fragments)
| ShowLeadingWhiteSpaceProcessor |
python | fastapi__sqlmodel | sqlmodel/main.py | {
"start": 2639,
"end": 5700
} | class ____(PydanticFieldInfo): # type: ignore[misc]
# mypy - ignore that PydanticFieldInfo is @final
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
primary_key = kwargs.pop("primary_key", False)
nullable = kwargs.pop("nullable", Undefined)
foreign_key = kwargs.pop("foreign_key", Undefined)
ondelete = kwargs.pop("ondelete", Undefined)
unique = kwargs.pop("unique", False)
index = kwargs.pop("index", Undefined)
sa_type = kwargs.pop("sa_type", Undefined)
sa_column = kwargs.pop("sa_column", Undefined)
sa_column_args = kwargs.pop("sa_column_args", Undefined)
sa_column_kwargs = kwargs.pop("sa_column_kwargs", Undefined)
if sa_column is not Undefined:
if sa_column_args is not Undefined:
raise RuntimeError(
"Passing sa_column_args is not supported when "
"also passing a sa_column"
)
if sa_column_kwargs is not Undefined:
raise RuntimeError(
"Passing sa_column_kwargs is not supported when "
"also passing a sa_column"
)
if primary_key is not Undefined:
raise RuntimeError(
"Passing primary_key is not supported when also passing a sa_column"
)
if nullable is not Undefined:
raise RuntimeError(
"Passing nullable is not supported when also passing a sa_column"
)
if foreign_key is not Undefined:
raise RuntimeError(
"Passing foreign_key is not supported when also passing a sa_column"
)
if ondelete is not Undefined:
raise RuntimeError(
"Passing ondelete is not supported when also passing a sa_column"
)
if unique is not Undefined:
raise RuntimeError(
"Passing unique is not supported when also passing a sa_column"
)
if index is not Undefined:
raise RuntimeError(
"Passing index is not supported when also passing a sa_column"
)
if sa_type is not Undefined:
raise RuntimeError(
"Passing sa_type is not supported when also passing a sa_column"
)
if ondelete is not Undefined:
if foreign_key is Undefined:
raise RuntimeError("ondelete can only be used with foreign_key")
super().__init__(default=default, **kwargs)
self.primary_key = primary_key
self.nullable = nullable
self.foreign_key = foreign_key
self.ondelete = ondelete
self.unique = unique
self.index = index
self.sa_type = sa_type
self.sa_column = sa_column
self.sa_column_args = sa_column_args
self.sa_column_kwargs = sa_column_kwargs
| FieldInfo |
python | pytorch__pytorch | benchmarks/dynamo/timm_models.py | {
"start": 4959,
"end": 12089
} | class ____(BenchmarkRunner):
def __init__(self):
super().__init__()
self.suite_name = "timm_models"
@property
def _config(self):
return load_yaml_file("timm_models.yaml")
@property
def _skip(self):
return self._config["skip"]
@property
def skip_models_for_cpu(self):
return self._skip["device"]["cpu"]
@property
def skip_models_for_cpu_aarch64(self):
return self._skip["device"]["cpu_aarch64"]
@property
def skip_models(self):
return self._skip["all"]
@property
def force_amp_for_fp16_bf16_models(self):
return FORCE_AMP_FOR_FP16_BF16_MODELS
@property
def force_fp16_for_bf16_models(self):
return set()
@property
def get_output_amp_train_process_func(self):
return {}
@property
def skip_accuracy_check_as_eager_non_deterministic(self):
if self.args.accuracy and self.args.training:
return SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS
return set()
@property
def guard_on_nn_module_models(self):
return {}
@property
def inline_inbuilt_nn_modules_models(self):
return {}
@download_retry_decorator
def _download_model(self, model_name):
model = create_model(
model_name,
in_chans=3,
scriptable=False,
num_classes=None,
drop_rate=0.0,
drop_path_rate=None,
drop_block_rate=None,
pretrained=True,
)
return model
def load_model(
self,
device,
model_name,
batch_size=None,
extra_args=None,
):
if self.args.enable_activation_checkpointing:
raise NotImplementedError(
"Activation checkpointing not implemented for Timm models"
)
is_training = self.args.training
use_eval_mode = self.args.use_eval_mode
channels_last = self._args.channels_last
model = self._download_model(model_name)
if model is None:
raise RuntimeError(f"Failed to load model '{model_name}'")
model.to(
device=device,
memory_format=torch.channels_last if channels_last else None,
)
data_config = resolve_data_config(
vars(self._args) if timmversion >= "0.8.0" else self._args,
model=model,
use_test_size=not is_training,
)
input_size = data_config["input_size"]
recorded_batch_size = TIMM_MODELS[model_name]
if model_name in BATCH_SIZE_DIVISORS:
recorded_batch_size = max(
int(recorded_batch_size / BATCH_SIZE_DIVISORS[model_name]), 1
)
batch_size = batch_size or recorded_batch_size
torch.manual_seed(1337)
input_tensor = torch.randint(
256, size=(batch_size,) + input_size, device=device
).to(dtype=torch.float32)
mean = torch.mean(input_tensor)
std_dev = torch.std(input_tensor)
example_inputs = (input_tensor - mean) / std_dev
if channels_last:
example_inputs = example_inputs.contiguous(
memory_format=torch.channels_last
)
example_inputs = [
example_inputs,
]
self.loss = torch.nn.CrossEntropyLoss().to(device)
if model_name in SCALED_COMPUTE_LOSS:
self.compute_loss = self.scaled_compute_loss
if is_training and not use_eval_mode:
model.train()
else:
model.eval()
self.validate_model(model, example_inputs)
return device, model_name, model, example_inputs, batch_size
def iter_model_names(self, args):
# for model_name in list_models(pretrained=True, exclude_filters=["*in21k"]):
model_names = sorted(TIMM_MODELS.keys())
start, end = self.get_benchmark_indices(len(model_names))
for index, model_name in enumerate(model_names):
if index < start or index >= end:
continue
if (
not re.search("|".join(args.filter), model_name, re.IGNORECASE)
or re.search("|".join(args.exclude), model_name, re.IGNORECASE)
or model_name in args.exclude_exact
or model_name in self.skip_models
):
continue
yield model_name
def pick_grad(self, name, is_training):
if is_training:
return torch.enable_grad()
else:
return torch.no_grad()
def use_larger_multiplier_for_smaller_tensor(self, name):
return name in REQUIRE_LARGER_MULTIPLIER_FOR_SMALLER_TENSOR
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
cosine = self.args.cosine
tolerance = 1e-3
if self.args.freezing and name in REQUIRE_HIGHER_TOLERANCE_FOR_FREEZING:
# the conv-batchnorm fusion used under freezing may cause relatively
# large numerical difference. We need are larger tolerance.
# Check https://github.com/pytorch/pytorch/issues/120545 for context
tolerance = 8 * 1e-2
if is_training:
from torch._inductor import config as inductor_config
if name == "beit_base_patch16_224":
tolerance = 16 * 1e-2
elif name in REQUIRE_EVEN_HIGHER_TOLERANCE or (
inductor_config.max_autotune
and name in REQUIRE_EVEN_HIGHER_TOLERANCE_MAX_AUTOTUNE
):
tolerance = 8 * 1e-2
elif name in REQUIRE_HIGHER_TOLERANCE or (
self.args.amp and name in REQUIRE_HIGHER_TOLERANCE_AMP
):
tolerance = 4 * 1e-2
else:
tolerance = 1e-2
return tolerance, cosine
def compute_loss(self, pred):
# High loss values make gradient checking harder, as small changes in
# accumulation order upsets accuracy checks.
return reduce_to_scalar_loss(pred)
def scaled_compute_loss(self, pred):
# Loss values need zoom out further.
return reduce_to_scalar_loss(pred) / 1000.0
def forward_pass(self, mod, inputs, collect_outputs=True):
with self.autocast(**self.autocast_arg):
return mod(*inputs)
def forward_and_backward_pass(self, mod, inputs, collect_outputs=True):
cloned_inputs = clone_inputs(inputs)
self.optimizer_zero_grad(mod)
with self.autocast(**self.autocast_arg):
pred = mod(*cloned_inputs)
if isinstance(pred, tuple):
pred = pred[0]
loss = self.compute_loss(pred)
self.grad_scaler.scale(loss).backward()
self.optimizer_step()
if collect_outputs:
return collect_results(mod, None, loss, cloned_inputs)
return None
def timm_main():
logging.basicConfig(level=logging.WARNING)
warnings.filterwarnings("ignore")
main(TimmRunner())
if __name__ == "__main__":
timm_main()
| TimmRunner |
python | coleifer__peewee | tests/model_save.py | {
"start": 309,
"end": 398
} | class ____(TestModel):
pk = IntegerField(primary_key=True)
value = IntegerField()
| T3 |
python | joke2k__faker | faker/providers/geo/pl_PL/__init__.py | {
"start": 41,
"end": 2893
} | class ____(GeoProvider):
# Source:
# https://latitude.to/map/pl/poland/cities/
land_coords = (
("52.22977", "21.01178", "Warszawa", "PL", "Europe/Warsaw"),
("51.75", "19.46667", "Łódź", "PL", "Europe/Warsaw"),
("50.06143", "19.93658", "Kraków", "PL", "Europe/Warsaw"),
("51.1", "17.03333", "Wrocław", "PL", "Europe/Warsaw"),
("52.40692", "16.92993", "Poznań", "PL", "Europe/Warsaw"),
("54.35205", "18.64637", "Gdańsk", "PL", "Europe/Warsaw"),
("53.42894", "14.55302", "Szczecin", "PL", "Europe/Warsaw"),
("53.1235", "18.00762", "Bydgoszcz", "PL", "Europe/Warsaw"),
("51.25", "22.56667", "Lublin", "PL", "Europe/Warsaw"),
("50.25841", "19.02754", "Katowice", "PL", "Europe/Warsaw"),
("53.13333", "23.16433", "Białystok", "PL", "Europe/Warsaw"),
("54.51889", "18.53188", "Gdynia", "PL", "Europe/Warsaw"),
("50.79646", "19.12409", "Częstochowa", "PL", "Europe/Warsaw"),
("50.28682", "19.10385", "Sosnowiec", "PL", "Europe/Warsaw"),
("51.40253", "21.14714", "Radom", "PL", "Europe/Warsaw"),
("53.01375", "18.59814", "Toruń", "PL", "Europe/Warsaw"),
("50.87033", "20.62752", "Kielce", "PL", "Europe/Warsaw"),
("50.29761", "18.67658", "Gliwice", "PL", "Europe/Warsaw"),
("50.32492", "18.78576", "Zabrze", "PL", "Europe/Warsaw"),
("50.34802", "18.93282", "Bytom", "PL", "Europe/Warsaw"),
("49.82245", "19.04686", "Bielsko-Biała", "PL", "Europe/Warsaw"),
("53.77995", "20.49416", "Olsztyn", "PL", "Europe/Warsaw"),
("50.04132", "21.99901", "Rzeszów", "PL", "Europe/Warsaw"),
("50.2584", "18.85632", "Ruda Śląska", "PL", "Europe/Warsaw"),
("50.09713", "18.54179", "Rybnik", "PL", "Europe/Warsaw"),
("50.31818", "19.2374", "Dąbrowa Górnicza", "PL", "Europe/Warsaw"),
("50.13717", "18.96641", "Tychy", "PL", "Europe/Warsaw"),
("50.67211", "17.92533", "Opole", "PL", "Europe/Warsaw"),
("54.1522", "19.40884", "Elbląg", "PL", "Europe/Warsaw"),
("52.54682", "19.70638", "Płock", "PL", "Europe/Warsaw"),
("50.77141", "16.28432", "Wałbrzych", "PL", "Europe/Warsaw"),
("52.73679", "15.22878", "Gorzów Wielkopolski", "PL", "Europe/Warsaw"),
("52.64817", "19.0678", "Włocławek", "PL", "Europe/Warsaw"),
("51.93548", "15.50643", "Zielona Góra", "PL", "Europe/Warsaw"),
("50.01381", "20.98698", "Tarnów", "PL", "Europe/Warsaw"),
("51.76109", "18.09102", "Kalisz", "PL", "Europe/Warsaw"),
("54.19438", "16.17222", "Koszalin", "PL", "Europe/Warsaw"),
("51.21006", "16.1619", "Legnica", "PL", "Europe/Warsaw"),
("53.48411", "18.75366", "Grudziądz", "PL", "Europe/Warsaw"),
("54.46405", "17.02872", "Słupsk", "PL", "Europe/Warsaw"),
)
| Provider |
python | TheAlgorithms__Python | maths/pythagoras.py | {
"start": 100,
"end": 712
} | class ____:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __repr__(self) -> str:
return f"Point({self.x}, {self.y}, {self.z})"
def distance(a: Point, b: Point) -> float:
"""
>>> point1 = Point(2, -1, 7)
>>> point2 = Point(1, -3, 5)
>>> print(f"Distance from {point1} to {point2} is {distance(point1, point2)}")
Distance from Point(2, -1, 7) to Point(1, -3, 5) is 3.0
"""
return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2))
if __name__ == "__main__":
import doctest
doctest.testmod()
| Point |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/run_coordinator_tests/test_queued_run_coordinator.py | {
"start": 547,
"end": 8121
} | class ____:
"""You can extend this class to easily run these set of tests on any custom run coordinator
that subclasses the QueuedRunCoordinator. When extending, you simply need to override the
`coordinator` fixture and return your implementation of `QueuedRunCoordinator`.
For example:
```
class TestMyRunCoordinator(TestQueuedRunCoordinator):
@pytest.fixture(scope='function')
def coordinator(self, instance):
run_coordinator = MyRunCoordinator()
run_coordinator.register_instance(instance)
yield run_coordinator
```
"""
@pytest.fixture
def instance(self) -> Iterator[dg.DagsterInstance]:
overrides = {
"run_launcher": {"module": "dagster._core.test_utils", "class": "MockedRunLauncher"}
}
with dg.instance_for_test(overrides=overrides) as inst:
yield inst
@pytest.fixture
def coordinator(self, instance: DagsterInstance) -> Iterator[dg.QueuedRunCoordinator]:
run_coordinator = dg.QueuedRunCoordinator()
run_coordinator.register_instance(instance)
yield run_coordinator
@pytest.fixture(name="workspace")
def workspace_fixture(self, instance: DagsterInstance) -> Iterator[WorkspaceRequestContext]:
with get_bar_workspace(instance) as workspace:
yield workspace
@pytest.fixture(name="remote_job")
def remote_job_fixture(self, workspace: WorkspaceRequestContext) -> RemoteJob:
location = workspace.get_code_location("bar_code_location")
return location.get_repository("bar_repo").get_full_job("foo")
def create_run_for_test(
self, instance: DagsterInstance, remote_job: RemoteJob, **kwargs: object
) -> dg.DagsterRun:
job_args = merge_dicts(
{
"job_name": "foo",
"remote_job_origin": remote_job.get_remote_origin(),
"job_code_origin": remote_job.get_python_origin(),
},
kwargs,
)
return create_run_for_test(instance, **job_args)
def test_config(self):
with environ({"MAX_RUNS": "10", "DEQUEUE_INTERVAL": "7"}):
with dg.instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster._core.run_coordinator",
"class": "QueuedRunCoordinator",
"config": {
"max_concurrent_runs": {
"env": "MAX_RUNS",
},
"tag_concurrency_limits": [
{"key": "foo", "value": "bar", "limit": 3},
{"key": "backfill", "limit": 2},
],
"dequeue_interval_seconds": {
"env": "DEQUEUE_INTERVAL",
},
"block_op_concurrency_limited_runs": {
"enabled": True,
"op_concurrency_slot_buffer": 1,
},
},
}
}
) as _:
pass
with pytest.raises(dg.DagsterInvalidConfigError):
with dg.instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster._core.run_coordinator",
"class": "QueuedRunCoordinator",
"config": {
"tag_concurrency_limits": [
{"key": "backfill"},
],
},
}
}
) as instance:
print(instance.run_coordinator) # noqa: T201
def test_config_unique_value(self):
with environ({"MAX_RUNS": "10", "DEQUEUE_INTERVAL": "7"}):
with dg.instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster._core.run_coordinator",
"class": "QueuedRunCoordinator",
"config": {
"max_concurrent_runs": {
"env": "MAX_RUNS",
},
"tag_concurrency_limits": [
{
"key": "foo",
"value": {"applyLimitPerUniqueValue": True},
"limit": 3,
},
{"key": "backfill", "limit": 2},
],
"dequeue_interval_seconds": {
"env": "DEQUEUE_INTERVAL",
},
},
}
}
) as _:
pass
def test_submit_run(self, instance, coordinator, workspace, remote_job):
run = self.create_run_for_test(instance, remote_job, status=DagsterRunStatus.NOT_STARTED)
returned_run = coordinator.submit_run(dg.SubmitRunContext(run, workspace))
assert returned_run.run_id == run.run_id
assert returned_run.status == DagsterRunStatus.QUEUED
assert len(instance.run_launcher.queue()) == 0
stored_run = instance.get_run_by_id(run.run_id)
assert stored_run.status == DagsterRunStatus.QUEUED
events = list(
instance.get_records_for_run(
run.run_id, of_type=DagsterEventType.PIPELINE_ENQUEUED
).records
)
assert len(events) == 1
run_enqueued_data = events[0].event_log_entry.dagster_event.run_enqueued_data
assert run_enqueued_data
assert run_enqueued_data.code_location_name is not None
assert run_enqueued_data.repository_name is not None
def test_submit_run_checks_status(self, instance, coordinator, workspace, remote_job):
run = self.create_run_for_test(instance, remote_job, status=DagsterRunStatus.QUEUED)
coordinator.submit_run(dg.SubmitRunContext(run, workspace))
# check that no enqueue event is reported (the submit run call is a no-op)
assert (
len(
instance.get_records_for_run(
run.run_id, of_type=DagsterEventType.PIPELINE_ENQUEUED
).records
)
== 0
)
def test_cancel_run(self, instance, coordinator, workspace, remote_job):
run = self.create_run_for_test(instance, remote_job, status=DagsterRunStatus.NOT_STARTED)
coordinator.submit_run(dg.SubmitRunContext(run, workspace))
coordinator.cancel_run(run.run_id)
stored_run = instance.get_run_by_id(run.run_id)
assert stored_run.status == DagsterRunStatus.CANCELED
def test_thread_config():
num = 16
with dg.instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster._core.run_coordinator",
"class": "QueuedRunCoordinator",
"config": {
"dequeue_use_threads": True,
"dequeue_num_workers": num,
},
}
}
) as instance:
assert instance.run_coordinator.dequeue_num_workers == num # pyright: ignore[reportAttributeAccessIssue]
| TestQueuedRunCoordinator |
python | kennethreitz__tablib | src/tablib/formats/_df.py | {
"start": 118,
"end": 1124
} | class ____:
title = 'df'
extensions = ('df',)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is a DataFrame."""
if DataFrame is None:
return False
elif isinstance(stream, DataFrame):
return True
try:
DataFrame(stream.read())
return True
except ValueError:
return False
@classmethod
def export_set(cls, dset, index=None):
"""Returns DataFrame representation of DataBook."""
if DataFrame is None:
raise NotImplementedError(
'DataFrame Format requires `pandas` to be installed.'
' Try `pip install "tablib[pandas]"`.')
dataframe = DataFrame(dset.dict, columns=dset.headers)
return dataframe
@classmethod
def import_set(cls, dset, in_stream):
"""Returns dataset from DataFrame."""
dset.wipe()
dset.dict = in_stream.to_dict(orient='records')
| DataFrameFormat |
python | python__mypy | mypyc/codegen/emit.py | {
"start": 3944,
"end": 4028
} | class ____:
"""Describes handling errors in unbox/cast operations."""
| ErrorHandler |
python | Textualize__textual | src/textual/css/styles.py | {
"start": 4973,
"end": 27681
} | class ____:
"""A common base class for Styles and RenderStyles"""
ANIMATABLE = {
"offset",
"padding",
"margin",
"width",
"height",
"min_width",
"min_height",
"max_width",
"max_height",
"auto_color",
"color",
"background",
"background_tint",
"opacity",
"position",
"text_opacity",
"tint",
"scrollbar_color",
"scrollbar_color_hover",
"scrollbar_color_active",
"scrollbar_background",
"scrollbar_background_hover",
"scrollbar_background_active",
"scrollbar_visibility",
"link_color",
"link_background",
"link_color_hover",
"link_background_hover",
"text_wrap",
"text_overflow",
"line_pad",
}
node: DOMNode | None = None
display = StringEnumProperty(VALID_DISPLAY, "block", layout=True, display=True)
"""Set the display of the widget, defining how it's rendered.
Valid values are "block" or "none".
"none" will hide and allow other widgets to fill the space that this widget would occupy.
Set to None to clear any value that was set at runtime.
Raises:
StyleValueError: If an invalid display is specified.
"""
visibility = StringEnumProperty(VALID_VISIBILITY, "visible", layout=True)
"""Set the visibility of the widget.
Valid values are "visible" or "hidden".
"hidden" will hide the widget, but reserve the space for this widget.
If you want to hide the widget and allow another widget to fill the space,
set the display attribute to "none" instead.
Set to None to clear any value that was set at runtime.
Raises:
StyleValueError: If an invalid visibility is specified.
"""
layout = LayoutProperty()
"""Set the layout of the widget, defining how its children are laid out.
Valid values are "grid", "stream", "horizontal", or "vertical" or None to clear any layout
that was set at runtime.
Raises:
MissingLayout: If an invalid layout is specified.
"""
auto_color = BooleanProperty(default=False)
"""Enable automatic picking of best contrasting color."""
color = ColorProperty(Color(255, 255, 255))
"""Set the foreground (text) color of the widget.
Supports `Color` objects but also strings e.g. "red" or "#ff0000".
You can also specify an opacity after a color e.g. "blue 10%"
"""
background = ColorProperty(Color(0, 0, 0, 0))
"""Set the background color of the widget.
Supports `Color` objects but also strings e.g. "red" or "#ff0000"
You can also specify an opacity after a color e.g. "blue 10%"
"""
background_tint = ColorProperty(Color(0, 0, 0, 0))
"""Set a color to tint (blend) with the background.
Supports `Color` objects but also strings e.g. "red" or "#ff0000"
You can also specify an opacity after a color e.g. "blue 10%"
"""
text_style = StyleFlagsProperty()
"""Set the text style of the widget using Rich StyleFlags.
e.g. `"bold underline"` or `"b u strikethrough"`.
"""
opacity = FractionalProperty(children=True)
"""Set the opacity of the widget, defining how it blends with the parent."""
text_opacity = FractionalProperty()
"""Set the opacity of the content within the widget against the widget's background."""
padding = SpacingProperty()
"""Set the padding (spacing between border and content) of the widget."""
margin = SpacingProperty()
"""Set the margin (spacing outside the border) of the widget."""
offset = OffsetProperty()
"""Set the offset of the widget relative to where it would have been otherwise."""
position = StringEnumProperty(VALID_POSITION, "relative")
"""If `relative` offset is applied to widgets current position, if `absolute` it is applied to (0, 0)."""
border = BorderProperty(layout=True)
"""Set the border of the widget e.g. ("round", "green") or "none"."""
border_top = BoxProperty(Color(0, 255, 0))
"""Set the top border of the widget e.g. ("round", "green") or "none"."""
border_right = BoxProperty(Color(0, 255, 0))
"""Set the right border of the widget e.g. ("round", "green") or "none"."""
border_bottom = BoxProperty(Color(0, 255, 0))
"""Set the bottom border of the widget e.g. ("round", "green") or "none"."""
border_left = BoxProperty(Color(0, 255, 0))
"""Set the left border of the widget e.g. ("round", "green") or "none"."""
border_title_align = StringEnumProperty(VALID_ALIGN_HORIZONTAL, "left")
"""The alignment of the border title text."""
border_subtitle_align = StringEnumProperty(VALID_ALIGN_HORIZONTAL, "right")
"""The alignment of the border subtitle text."""
outline = BorderProperty(layout=False)
"""Set the outline of the widget e.g. ("round", "green") or "none".
The outline is drawn *on top* of the widget, rather than around it like border.
"""
outline_top = BoxProperty(Color(0, 255, 0))
"""Set the top outline of the widget e.g. ("round", "green") or "none"."""
outline_right = BoxProperty(Color(0, 255, 0))
"""Set the right outline of the widget e.g. ("round", "green") or "none"."""
outline_bottom = BoxProperty(Color(0, 255, 0))
"""Set the bottom outline of the widget e.g. ("round", "green") or "none"."""
outline_left = BoxProperty(Color(0, 255, 0))
"""Set the left outline of the widget e.g. ("round", "green") or "none"."""
keyline = KeylineProperty()
"""Keyline parameters."""
box_sizing = StringEnumProperty(VALID_BOX_SIZING, "border-box", layout=True)
"""Box sizing method ("border-box" or "conetnt-box")"""
width = ScalarProperty(percent_unit=Unit.WIDTH)
"""Set the width of the widget."""
height = ScalarProperty(percent_unit=Unit.HEIGHT)
"""Set the height of the widget."""
min_width = ScalarProperty(percent_unit=Unit.WIDTH, allow_auto=False)
"""Set the minimum width of the widget."""
min_height = ScalarProperty(percent_unit=Unit.HEIGHT, allow_auto=False)
"""Set the minimum height of the widget."""
max_width = ScalarProperty(percent_unit=Unit.WIDTH, allow_auto=False)
"""Set the maximum width of the widget."""
max_height = ScalarProperty(percent_unit=Unit.HEIGHT, allow_auto=False)
"""Set the maximum height of the widget."""
dock = DockProperty()
"""Set which edge of the parent to dock this widget to e.g. "top", "left", "right", "bottom", "none".
"""
split = SplitProperty()
overflow_x = OverflowProperty(VALID_OVERFLOW, "hidden")
"""Control what happens when the content extends horizontally beyond the widget's width.
Valid values are "scroll", "hidden", or "auto".
"""
overflow_y = OverflowProperty(VALID_OVERFLOW, "hidden")
"""Control what happens when the content extends vertically beyond the widget's height.
Valid values are "scroll", "hidden", or "auto".
"""
layer = NameProperty()
layers = NameListProperty()
transitions = TransitionsProperty()
tint = ColorProperty("transparent")
"""Set the tint of the widget. This allows you apply an opaque color above the widget.
You can specify an opacity after a color e.g. "blue 10%"
"""
scrollbar_color = ScrollbarColorProperty("ansi_bright_magenta")
"""Set the color of the handle of the scrollbar."""
scrollbar_color_hover = ScrollbarColorProperty("ansi_yellow")
"""Set the color of the handle of the scrollbar when hovered."""
scrollbar_color_active = ScrollbarColorProperty("ansi_bright_yellow")
"""Set the color of the handle of the scrollbar when active (being dragged)."""
scrollbar_corner_color = ScrollbarColorProperty("#666666")
"""Set the color of the space between the horizontal and vertical scrollbars."""
scrollbar_background = ScrollbarColorProperty("#555555")
"""Set the background color of the scrollbar (the track that the handle sits on)."""
scrollbar_background_hover = ScrollbarColorProperty("#444444")
"""Set the background color of the scrollbar when hovered."""
scrollbar_background_active = ScrollbarColorProperty("black")
"""Set the background color of the scrollbar when active (being dragged)."""
scrollbar_gutter = StringEnumProperty(
VALID_SCROLLBAR_GUTTER, "auto", layout=True, refresh_children=True
)
"""Set to "stable" to reserve space for the scrollbar even when it's not visible.
This can prevent content from shifting when a scrollbar appears.
"""
scrollbar_size_vertical = IntegerProperty(default=2, layout=True)
"""Set the width of the vertical scrollbar (measured in cells)."""
scrollbar_size_horizontal = IntegerProperty(default=1, layout=True)
"""Set the height of the horizontal scrollbar (measured in cells)."""
scrollbar_visibility = StringEnumProperty(
VALID_SCROLLBAR_VISIBILITY, "visible", layout=True
)
"""Sets the visibility of the scrollbar."""
align_horizontal = StringEnumProperty(
VALID_ALIGN_HORIZONTAL, "left", layout=True, refresh_children=True
)
align_vertical = StringEnumProperty(
VALID_ALIGN_VERTICAL, "top", layout=True, refresh_children=True
)
align = AlignProperty()
content_align_horizontal = StringEnumProperty(VALID_ALIGN_HORIZONTAL, "left")
content_align_vertical = StringEnumProperty(VALID_ALIGN_VERTICAL, "top")
content_align = AlignProperty()
grid_rows = ScalarListProperty(percent_unit=Unit.HEIGHT, refresh_children=True)
grid_columns = ScalarListProperty(percent_unit=Unit.WIDTH, refresh_children=True)
grid_size_columns = IntegerProperty(default=1, layout=True, refresh_children=True)
grid_size_rows = IntegerProperty(default=0, layout=True, refresh_children=True)
grid_gutter_horizontal = IntegerProperty(
default=0, layout=True, refresh_children=True
)
grid_gutter_vertical = IntegerProperty(
default=0, layout=True, refresh_children=True
)
row_span = IntegerProperty(default=1, layout=True)
column_span = IntegerProperty(default=1, layout=True)
text_align: StringEnumProperty[TextAlign] = StringEnumProperty(
VALID_TEXT_ALIGN, "start"
)
link_color = ColorProperty("transparent")
auto_link_color = BooleanProperty(False)
link_background = ColorProperty("transparent")
link_style = StyleFlagsProperty()
link_color_hover = ColorProperty("transparent")
auto_link_color_hover = BooleanProperty(False)
link_background_hover = ColorProperty("transparent")
link_style_hover = StyleFlagsProperty()
auto_border_title_color = BooleanProperty(default=False)
border_title_color = ColorProperty(Color(255, 255, 255, 0))
border_title_background = ColorProperty(Color(0, 0, 0, 0))
border_title_style = StyleFlagsProperty()
auto_border_subtitle_color = BooleanProperty(default=False)
border_subtitle_color = ColorProperty(Color(255, 255, 255, 0))
border_subtitle_background = ColorProperty(Color(0, 0, 0, 0))
border_subtitle_style = StyleFlagsProperty()
hatch = HatchProperty()
"""Add a hatched background effect e.g. ("right", "yellow") or "none" to use no hatch.
"""
overlay = StringEnumProperty(
VALID_OVERLAY, "none", layout=True, refresh_parent=True
)
constrain_x: StringEnumProperty[Constrain] = StringEnumProperty(
VALID_CONSTRAIN, "none"
)
constrain_y: StringEnumProperty[Constrain] = StringEnumProperty(
VALID_CONSTRAIN, "none"
)
text_wrap: StringEnumProperty[TextWrap] = StringEnumProperty(
VALID_TEXT_WRAP, "wrap"
)
text_overflow: StringEnumProperty[TextOverflow] = StringEnumProperty(
VALID_TEXT_OVERFLOW, "fold"
)
expand: StringEnumProperty[Expand] = StringEnumProperty(VALID_EXPAND, "greedy")
line_pad = IntegerProperty(default=0, layout=True)
"""Padding added to left and right of lines."""
def __textual_animation__(
self,
attribute: str,
start_value: object,
value: object,
start_time: float,
duration: float | None,
speed: float | None,
easing: EasingFunction,
on_complete: CallbackType | None = None,
level: AnimationLevel = "full",
) -> ScalarAnimation | None:
if self.node is None:
return None
# Check we are animating a Scalar or Scalar offset
if isinstance(start_value, (Scalar, ScalarOffset)):
# If destination is a number, we can convert that to a scalar
if isinstance(value, (int, float)):
value = Scalar(value, Unit.CELLS, Unit.CELLS)
# We can only animate to Scalar
if not isinstance(value, (Scalar, ScalarOffset)):
return None
from textual.widget import Widget
assert isinstance(self.node, Widget)
return ScalarAnimation(
self.node,
self,
start_time,
attribute,
value,
duration=duration,
speed=speed,
easing=easing,
on_complete=(
partial(self.node.app.call_later, on_complete)
if on_complete is not None
else None
),
level=level,
)
return None
def __eq__(self, styles: object) -> bool:
"""Check that Styles contains the same rules."""
if not isinstance(styles, StylesBase):
return NotImplemented
return self.get_rules() == styles.get_rules()
def __getitem__(self, key: str) -> object:
if key not in RULE_NAMES_SET:
raise KeyError(key)
return getattr(self, key)
def get(self, key: str, default: object | None = None) -> object:
return getattr(self, key) if key in RULE_NAMES_SET else default
def __len__(self) -> int:
return len(RULE_NAMES)
def __iter__(self) -> Iterator[str]:
return iter(RULE_NAMES)
def __contains__(self, key: object) -> bool:
return key in RULE_NAMES_SET
def keys(self) -> Iterable[str]:
return RULE_NAMES
def values(self) -> Iterable[object]:
for key in RULE_NAMES:
yield getattr(self, key)
def items(self) -> Iterable[tuple[str, object]]:
for key in RULE_NAMES:
yield (key, getattr(self, key))
@property
def gutter(self) -> Spacing:
"""Get space around widget.
Returns:
Space around widget content.
"""
return self.padding + self.border.spacing
@property
def auto_dimensions(self) -> bool:
"""Check if width or height are set to 'auto'."""
has_rule = self.has_rule
return (has_rule("width") and self.width.is_auto) or ( # type: ignore
has_rule("height") and self.height.is_auto # type: ignore
)
@property
def is_relative_width(self, _relative_units={Unit.FRACTION, Unit.PERCENT}) -> bool:
"""Does the node have a relative width?"""
width = self.width
return width is not None and width.unit in _relative_units
@property
def is_relative_height(self, _relative_units={Unit.FRACTION, Unit.PERCENT}) -> bool:
"""Does the node have a relative width?"""
height = self.height
return height is not None and height.unit in _relative_units
@property
def is_auto_width(self, _auto=Unit.AUTO) -> bool:
"""Does the node have automatic width?"""
width = self.width
return width is not None and width.unit == _auto
@property
def is_auto_height(self, _auto=Unit.AUTO) -> bool:
"""Does the node have automatic height?"""
height = self.height
return height is not None and height.unit == _auto
@property
def is_dynamic_height(
self, _dynamic_units={Unit.AUTO, Unit.FRACTION, Unit.PERCENT}
) -> bool:
"""Does the node have a dynamic (not fixed) height?"""
height = self.height
return height is not None and height.unit in _dynamic_units
@property
def is_docked(self) -> bool:
"""Is the node docked?"""
return self.dock != "none"
@property
def is_split(self) -> bool:
"""Is the node split?"""
return self.split != "none"
def has_rule(self, rule_name: str) -> bool:
"""Check if a rule is set on this Styles object.
Args:
rule_name: Rule name.
Returns:
``True`` if the rules is present, otherwise ``False``.
"""
raise NotImplementedError()
def clear_rule(self, rule_name: str) -> bool:
"""Removes the rule from the Styles object, as if it had never been set.
Args:
rule_name: Rule name.
Returns:
``True`` if a rule was cleared, or ``False`` if the rule is already not set.
"""
raise NotImplementedError()
def get_rules(self) -> RulesMap:
"""Get the rules in a mapping.
Returns:
A TypedDict of the rules.
"""
raise NotImplementedError()
def set_rule(self, rule_name: str, value: object | None) -> bool:
"""Set a rule.
Args:
rule_name: Rule name.
value: New rule value.
Returns:
``True`` if the rule changed, otherwise ``False``.
"""
raise NotImplementedError()
def get_rule(self, rule_name: str, default: object = None) -> object:
"""Get an individual rule.
Args:
rule_name: Name of rule.
default: Default if rule does not exists.
Returns:
Rule value or default.
"""
raise NotImplementedError()
def refresh(
self,
*,
layout: bool = False,
children: bool = False,
parent: bool = False,
repaint: bool = True,
) -> None:
"""Mark the styles as requiring a refresh.
Args:
layout: Also require a layout.
children: Also refresh children.
parent: Also refresh the parent.
repaint: Repaint the widgets.
"""
def reset(self) -> None:
"""Reset the rules to initial state."""
def merge(self, other: StylesBase) -> None:
"""Merge values from another Styles.
Args:
other: A Styles object.
"""
def merge_rules(self, rules: RulesMap) -> None:
"""Merge rules into Styles.
Args:
rules: A mapping of rules.
"""
def get_render_rules(self) -> RulesMap:
"""Get rules map with defaults."""
# Get a dictionary of rules, going through the properties
rules = dict(zip(RULE_NAMES, _rule_getter(self)))
return cast(RulesMap, rules)
@classmethod
def is_animatable(cls, rule: str) -> bool:
"""Check if a given rule may be animated.
Args:
rule: Name of the rule.
Returns:
``True`` if the rule may be animated, otherwise ``False``.
"""
return rule in cls.ANIMATABLE
@classmethod
def parse(
cls, css: str, read_from: CSSLocation, *, node: DOMNode | None = None
) -> Styles:
"""Parse CSS and return a Styles object.
Args:
css: Textual CSS.
read_from: Location where the CSS was read from.
node: Node to associate with the Styles.
Returns:
A Styles instance containing result of parsing CSS.
"""
from textual.css.parse import parse_declarations
styles = parse_declarations(css, read_from)
styles.node = node
return styles
def _get_transition(self, key: str) -> Transition | None:
"""Get a transition.
Args:
key: Transition key.
Returns:
Transition object or None it no transition exists.
"""
if key in self.ANIMATABLE:
return self.transitions.get(key, None)
else:
return None
def _align_width(self, width: int, parent_width: int) -> int:
"""Align the width dimension.
Args:
width: Width of the content.
parent_width: Width of the parent container.
Returns:
An offset to add to the X coordinate.
"""
offset_x = 0
align_horizontal = self.align_horizontal
if align_horizontal != "left":
if align_horizontal == "center":
offset_x = (parent_width - width) // 2
else:
offset_x = parent_width - width
return offset_x
def _align_height(self, height: int, parent_height: int) -> int:
"""Align the height dimensions
Args:
height: Height of the content.
parent_height: Height of the parent container.
Returns:
An offset to add to the Y coordinate.
"""
offset_y = 0
align_vertical = self.align_vertical
if align_vertical != "top":
if align_vertical == "middle":
offset_y = (parent_height - height) // 2
else:
offset_y = parent_height - height
return offset_y
def _align_size(self, child: tuple[int, int], parent: tuple[int, int]) -> Offset:
"""Align a size according to alignment rules.
Args:
child: The size of the child (width, height)
parent: The size of the parent (width, height)
Returns:
Offset required to align the child.
"""
width, height = child
parent_width, parent_height = parent
return Offset(
self._align_width(width, parent_width),
self._align_height(height, parent_height),
)
@property
def partial_rich_style(self) -> Style:
"""Get the style properties associated with this node only (not including parents in the DOM).
Returns:
Rich Style object.
"""
style = Style(
color=(
self.color.rich_color
if self.has_rule("color") and self.color.a > 0
else None
),
bgcolor=(
self.background.rich_color
if self.has_rule("background") and self.background.a > 0
else None
),
)
style += self.text_style
return style
@rich.repr.auto
@dataclass
| StylesBase |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/dispatcher.py | {
"start": 14626,
"end": 14953
} | class ____:
def __init__(self, root: Dispatcher) -> None:
self.dispatchers: Dict[str, Dispatcher] = {root.name: root}
def add_dispatcher(self, d: Dispatcher) -> None:
if d.name in self.dispatchers:
pass
else:
self.dispatchers[d.name] = d
Dispatcher.model_rebuild()
| Manager |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.