language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 72758,
"end": 80146
} | class ____(MultiOutputReduction):
@classmethod
def create( # type: ignore[override]
cls,
device: torch.device,
dtype: torch.dtype,
inner_fns: Sequence[Callable[..., Any]],
ranges: list[Integer],
reduction_ranges: list[Integer],
reduction_type: ReductionType,
reduction_hint: ReductionHint = ReductionHint.DEFAULT,
) -> Sequence[Union[TensorBox, ShapeAsConstantBuffer]]:
assert reduction_type in ("welford_reduce", "welford_combine")
reduction_numel = V.graph.sizevars.simplify(sympy_product(reduction_ranges))
def const(val: int) -> Union[TensorBox, ShapeAsConstantBuffer]:
def inner_fn(idx: Sequence[Expr]) -> OpsValue:
return ops.constant(
val,
dtype,
)
return Pointwise.create(
device=device,
dtype=dtype,
inner_fn=inner_fn,
ranges=list(ranges),
)
if reduction_numel == 0:
mean = const(0)
m2 = const(0)
weight = const(0)
return mean, m2, weight
if reduction_numel == 1:
def copy(
loader: Callable[[Sequence[Expr], Sequence[Expr]], OpsValue],
) -> Union[TensorBox, ShapeAsConstantBuffer]:
def inner_fn(idx: Sequence[Expr]) -> OpsValue:
reduction_index = [sympy.S.Zero for _ in reduction_ranges]
return loader(idx, reduction_index)
return Pointwise.create(
device=device,
dtype=dtype,
inner_fn=inner_fn,
ranges=list(ranges),
)
if reduction_type == "welford_reduce":
return copy(inner_fns[0]), const(0), const(1)
else:
return tuple(copy(fn) for fn in inner_fns)
# TODO: Unrolled reduction
# if (
# isinstance(reduction_numel, Integer)
# and V.graph.sizevars.size_hint(reduction_numel)
# < config.unroll_reductions_threshold
# and sympy_product(ranges) != 1
# ):
# return Pointwise.create(
# device,
# dst_dtype,
# cls._unroll_reduction_fn(
# inner_fn, reduction_ranges, reduction_type, src_dtype,
# ),
# ranges,
# )
# triton doesn't support reduce to single element well, so break it up
hint, split = Reduction.num_splits(
device,
dtype,
dtype,
inner_fns[0],
ranges,
reduction_ranges,
reduction_type=reduction_type,
reduction_numel=reduction_numel,
)
# intermediate reduction in split can contain complex indexing,
# and num_splits will fail to correctly set the hint
# reuse the passed hint if available
if reduction_hint == ReductionHint.DEFAULT:
reduction_hint = hint
if split > 1:
# triton doesn't support reduce to single element well, so break it up
return cls.create_multilayer(
device,
dtype,
inner_fns,
ranges,
reduction_ranges,
reduction_type,
split,
reduction_hint,
)
results = [
TensorBox.create(
WelfordReduction(
device,
dtype,
inner_fns,
ranges,
reduction_ranges,
reduction_type,
dtype,
reduction_hint,
output_idx,
)
)
for output_idx in range(3)
]
for t in results:
t.realize()
return results
@staticmethod
def default_value(
reduction_type: str, dtype: torch.dtype
) -> Union[_NumLike, Sequence[_NumLike]]:
return (0, 0, 0)
@classmethod
def create_multilayer( # type: ignore[override]
cls,
device: torch.device,
dtype: torch.dtype,
inner_fns: Sequence[Callable[..., Any]],
ranges: list[Integer],
reduction_ranges: list[Integer],
reduction_type: ReductionType,
split: _IntLike,
reduction_hint: ReductionHint,
) -> Sequence[Union[TensorBox, ShapeAsConstantBuffer]]:
"""
Break a large reduction up into multiple smaller reductions
recursively
"""
reduction_numel = sympy_product(reduction_ranges)
need_mask = not V.graph.sizevars.statically_known_true(
sympy.Eq(reduction_numel % split, 0)
)
if need_mask and reduction_type != "welford_combine":
# If we need mask, then "welford_reduce" doesn't work because
# masked inputs shouldn't count towards the welford weight
def constant(
idx: Sequence[Expr], reduction_idx: Sequence[Expr], value: int
) -> OpsValue:
return ops.constant(value, dtype)
return cls.create_multilayer(
device=device,
dtype=dtype,
inner_fns=(
inner_fns[0],
partial(constant, value=0),
partial(constant, value=1),
),
ranges=ranges,
reduction_ranges=reduction_ranges,
reduction_type="welford_combine",
split=split,
reduction_hint=reduction_hint,
)
block_size = FloorDiv(reduction_numel + (split - 1), split)
intermediates = WelfordReduction.create(
device,
dtype,
tuple(
cls._multilayer_wrap_loader(
loader,
reduction_ranges,
reduction_numel,
split,
block_size,
default=0,
)
for loader in inner_fns
),
[*ranges, split],
[block_size],
reduction_type,
reduction_hint,
)
for i in intermediates:
i.realize()
def intermediate_loader_fn(
index: Sequence[Expr],
reduction_index: Sequence[Expr],
loader: Callable[[Sequence[Expr]], OpsValue],
) -> OpsValue:
return loader([*index, *reduction_index])
numel_hint = V.graph.sizevars.size_hint(sympy_product(ranges))
reduction_hint = cls._multilayer_second_step_hint(
split, numel_hint, reduction_hint
)
return WelfordReduction.create(
device,
dtype,
tuple(
partial(intermediate_loader_fn, loader=i.make_loader())
for i in intermediates
),
ranges,
[split],
# welford_reduce turns one input into three outputs, which are combined with welford_combine
"welford_combine",
reduction_hint,
)
@ir_dataclass
| WelfordReduction |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 77682,
"end": 78557
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
custom_options = {"dtype": torch.quint8, "qscheme": torch.per_tensor_affine}
custom_qconfig = QConfig(
activation=default_observer.with_args(**custom_options),
weight=default_weight_observer,
)
self.sub2.fc1.qconfig = custom_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
| AnnotatedCustomConfigNestedModel |
python | bokeh__bokeh | src/bokeh/core/has_props.py | {
"start": 27744,
"end": 30307
} | class ____(TypedDict):
type: Literal["model"]
name: str
extends: NotRequired[Ref | None]
properties: NotRequired[list[PropertyDef]]
overrides: NotRequired[list[OverrideDef]]
def _HasProps_to_serializable(cls: type[HasProps], serializer: Serializer) -> Ref | ModelDef:
from ..model import DataModel, Model
from .types import ID
ref = Ref(id=ID(cls.__qualified_model__))
serializer.add_ref(cls, ref)
if not is_DataModel(cls):
return ref
# TODO: consider supporting mixin models
bases: list[type[HasProps]] = [ base for base in cls.__bases__ if issubclass(base, Model) and base != DataModel ]
if len(bases) == 0:
extends = None
elif len(bases) == 1:
[base] = bases
extends = serializer.encode(base)
else:
serializer.error("multiple bases are not supported")
properties: list[PropertyDef] = []
overrides: list[OverrideDef] = []
# TODO: don't use unordered sets
for prop_name in cls.__properties__:
descriptor = cls.lookup(prop_name)
kind = "Any" # TODO: serialize kinds
default = descriptor.property._default
if default is Undefined:
prop_def = PropertyDef(name=prop_name, kind=kind)
else:
if descriptor.is_unstable(default):
default = default()
prop_def = PropertyDef(name=prop_name, kind=kind, default=serializer.encode(default))
properties.append(prop_def)
for prop_name, default in getattr(cls, "__overridden_defaults__", {}).items():
overrides.append(OverrideDef(name=prop_name, default=serializer.encode(default)))
modeldef = ModelDef(
type="model",
name=cls.__qualified_model__,
)
if extends is not None:
modeldef["extends"] = extends
if properties:
modeldef["properties"] = properties
if overrides:
modeldef["overrides"] = overrides
return modeldef
Serializer.register(MetaHasProps, _HasProps_to_serializable)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_ABSTRACT_ADMONITION = '''
.. note::
This is an abstract base class used to help organize the hierarchy of Bokeh
model types. **It is not useful to instantiate on its own.**
'''
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ModelDef |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 95657,
"end": 97844
} | class ____(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", "0"))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
| HfDocTestParser |
python | pypa__warehouse | tests/unit/accounts/test_core.py | {
"start": 3760,
"end": 9060
} | class ____:
def test_unauthenticated_userid(self):
request = pretend.stub()
assert accounts._unauthenticated_userid(request) is None
def test_includeme(monkeypatch):
multi_policy_obj = pretend.stub()
multi_policy_cls = pretend.call_recorder(lambda ps: multi_policy_obj)
monkeypatch.setattr(accounts, "MultiSecurityPolicy", multi_policy_cls)
session_policy_obj = pretend.stub()
session_policy_cls = pretend.call_recorder(lambda: session_policy_obj)
monkeypatch.setattr(accounts, "SessionSecurityPolicy", session_policy_cls)
basic_policy_obj = pretend.stub()
basic_policy_cls = pretend.call_recorder(lambda: basic_policy_obj)
monkeypatch.setattr(accounts, "BasicAuthSecurityPolicy", basic_policy_cls)
macaroon_policy_obj = pretend.stub()
macaroon_policy_cls = pretend.call_recorder(lambda: macaroon_policy_obj)
monkeypatch.setattr(accounts, "MacaroonSecurityPolicy", macaroon_policy_cls)
config = pretend.stub(
registry=pretend.stub(
settings={
"warehouse.account.user_login_ratelimit_string": "10 per 5 minutes",
"warehouse.account.ip_login_ratelimit_string": "10 per 5 minutes",
"warehouse.account.global_login_ratelimit_string": "1000 per 5 minutes",
"warehouse.account.2fa_user_ratelimit_string": "5 per 5 minutes, 20 per hour, 50 per day", # noqa: E501
"warehouse.account.2fa_ip_ratelimit_string": "10 per 5 minutes, 50 per hour", # noqa: E501
"warehouse.account.email_add_ratelimit_string": "2 per day",
"warehouse.account.verify_email_ratelimit_string": "3 per 6 hours",
"warehouse.account.password_reset_ratelimit_string": "5 per day",
"warehouse.account.accounts_search_ratelimit_string": "100 per hour",
}
),
register_service_factory=pretend.call_recorder(
lambda factory, iface, name=None: None
),
add_request_method=pretend.call_recorder(lambda f, name, reify=False: None),
set_security_policy=pretend.call_recorder(lambda p: None),
maybe_dotted=pretend.call_recorder(lambda path: path),
add_route_predicate=pretend.call_recorder(lambda name, cls: None),
add_periodic_task=pretend.call_recorder(lambda *a, **kw: None),
)
accounts.includeme(config)
assert config.register_service_factory.calls == [
pretend.call(database_login_factory, IUserService),
pretend.call(
TokenServiceFactory(name="password"), ITokenService, name="password"
),
pretend.call(TokenServiceFactory(name="email"), ITokenService, name="email"),
pretend.call(
TokenServiceFactory(name="two_factor"), ITokenService, name="two_factor"
),
pretend.call(
TokenServiceFactory(name="confirm_login"),
ITokenService,
name="confirm_login",
),
pretend.call(
TokenServiceFactory(name="remember_device"),
ITokenService,
name="remember_device",
),
pretend.call(
HaveIBeenPwnedPasswordBreachedService.create_service,
IPasswordBreachedService,
),
pretend.call(
HaveIBeenPwnedEmailBreachedService.create_service,
IEmailBreachedService,
),
pretend.call(NullDomainStatusService.create_service, IDomainStatusService),
pretend.call(RateLimit("10 per 5 minutes"), IRateLimiter, name="user.login"),
pretend.call(RateLimit("10 per 5 minutes"), IRateLimiter, name="ip.login"),
pretend.call(
RateLimit("1000 per 5 minutes"), IRateLimiter, name="global.login"
),
pretend.call(
RateLimit("5 per 5 minutes, 20 per hour, 50 per day"),
IRateLimiter,
name="2fa.user",
),
pretend.call(
RateLimit("10 per 5 minutes, 50 per hour"), IRateLimiter, name="2fa.ip"
),
pretend.call(RateLimit("2 per day"), IRateLimiter, name="email.add"),
pretend.call(RateLimit("5 per day"), IRateLimiter, name="password.reset"),
pretend.call(RateLimit("3 per 6 hours"), IRateLimiter, name="email.verify"),
pretend.call(RateLimit("100 per hour"), IRateLimiter, name="accounts.search"),
]
assert config.add_request_method.calls == [
pretend.call(accounts._user, name="user", reify=True),
pretend.call(accounts._oidc_publisher, name="oidc_publisher", reify=True),
pretend.call(accounts._oidc_claims, name="oidc_claims", reify=True),
pretend.call(
accounts._organization_access, name="organization_access", reify=True
),
pretend.call(accounts._unauthenticated_userid, name="_unauthenticated_userid"),
]
assert config.set_security_policy.calls == [pretend.call(multi_policy_obj)]
assert multi_policy_cls.calls == [
pretend.call(
[
session_policy_obj,
basic_policy_obj,
macaroon_policy_obj,
]
)
]
assert (
pretend.call(crontab(minute="*/20"), compute_user_metrics)
in config.add_periodic_task.calls
)
| TestUnauthenticatedUserid |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 933571,
"end": 934315
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for User."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RepositoryCollaboratorEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| RepositoryCollaboratorConnection |
python | jazzband__django-formtools | tests/tests.py | {
"start": 474,
"end": 1196
} | class ____(preview.FormPreview):
def parse_params(self, request, *args, **kwargs):
self.state['user'] = request.user
def get_context(self, request, form):
context = super().get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
'APP_DIRS': True,
}],
ROOT_URLCONF='tests.urls',
)
| TestFormPreview |
python | doocs__leetcode | solution/0900-0999/0943.Find the Shortest Superstring/Solution.py | {
"start": 0,
"end": 1374
} | class ____:
def shortestSuperstring(self, words: List[str]) -> str:
n = len(words)
g = [[0] * n for _ in range(n)]
for i, a in enumerate(words):
for j, b in enumerate(words):
if i != j:
for k in range(min(len(a), len(b)), 0, -1):
if a[-k:] == b[:k]:
g[i][j] = k
break
dp = [[0] * n for _ in range(1 << n)]
p = [[-1] * n for _ in range(1 << n)]
for i in range(1 << n):
for j in range(n):
if (i >> j) & 1:
pi = i ^ (1 << j)
for k in range(n):
if (pi >> k) & 1:
v = dp[pi][k] + g[k][j]
if v > dp[i][j]:
dp[i][j] = v
p[i][j] = k
j = 0
for i in range(n):
if dp[-1][i] > dp[-1][j]:
j = i
arr = [j]
i = (1 << n) - 1
while p[i][j] != -1:
i, j = i ^ (1 << j), p[i][j]
arr.append(j)
arr = arr[::-1]
vis = set(arr)
arr.extend([j for j in range(n) if j not in vis])
ans = [words[arr[0]]] + [words[j][g[i][j] :] for i, j in pairwise(arr)]
return ''.join(ans)
| Solution |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 23355,
"end": 24419
} | class ____(fixtures.TablesTest, AssertsExecutionResults):
"""test returning() works with columns that define 'key'."""
__requires__ = ("insert_returning",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"returning_tbl",
metadata,
Column(
"id",
Integer,
primary_key=True,
key="foo_id",
test_needs_autoincrement=True,
),
Column("data", String(20)),
)
@testing.exclude("postgresql", "<", (8, 2), "8.2+ feature")
def test_insert(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(table.c.foo_id), dict(data="somedata")
)
row = result.first()._mapping
assert row[table.c.foo_id] == row["id"] == 1
result = connection.execute(table.select()).first()._mapping
assert row[table.c.foo_id] == row["id"] == 1
| KeyReturningTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 2253,
"end": 6814
} | class ____:
config: Mapping[str, Any]
parent_stream_cursor_alias: Optional[str] = None
@property
def shop_id(self) -> int:
return self.config.get("shop_id")
@property
def tools(self) -> BulkTools:
return BulkTools()
@property
@abstractmethod
def query_name(self) -> str:
"""
Defines the root graph node name to fetch from: https://shopify.dev/docs/api/admin-graphql
"""
@property
def record_composition(self) -> Optional[Mapping[str, Any]]:
"""
Example:
{
"new_record": "Collection", // the GQL Typename of the parent entity
"record_components": [
"CollectionPublication" // each `collection` has List `publications`
],
}
"""
return {}
@property
def sort_key(self) -> Optional[str]:
"""
The field name by which the records are ASC sorted, if defined.
"""
return None
@property
def supports_checkpointing(self) -> bool:
"""
The presence of `sort_key = "UPDATED_AT"` for a query instance, usually means,
the server-side BULK Job results are fetched and ordered correctly, suitable for checkpointing.
"""
return self.sort_key == "UPDATED_AT"
@property
def query_nodes(self) -> Optional[Union[List[Field], List[str]]]:
"""
Defines the fields for final graph selection.
https://shopify.dev/docs/api/admin-graphql
"""
return ["__typename", "id"]
def inject_parent_cursor_field(self, nodes: List[Field], key: str = "updatedAt", index: int = 2) -> List[Field]:
if self.parent_stream_cursor_alias:
# inject parent cursor key as alias to the `updatedAt` parent cursor field
nodes.insert(index, Field(name=key, alias=self.parent_stream_cursor_alias))
return nodes
def get(self, filter_field: Optional[str] = None, start: Optional[str] = None, end: Optional[str] = None) -> str:
# define filter query string, if passed
filter_query = f"{filter_field}:>='{start}' AND {filter_field}:<='{end}'" if filter_field else None
# building query
query: Query = self.query(filter_query)
# resolving
return self.resolve(query)
def query(self, filter_query: Optional[str] = None) -> Query:
"""
Overide this method, if you need to customize query build logic.
Output example to BULK query `<query_name>` with `filter query`:
{
<query_name>(query: "<filter_query>") {
edges {
node {
id
}
}
}
}
"""
# return the constructed query operation
return self.build(self.query_name, self.query_nodes, filter_query)
def build(
self,
name: str,
edges: Optional[Union[List[Field], List[InlineFragment], Field, InlineFragment]] = None,
filter_query: Optional[str] = None,
additional_query_args: Optional[Mapping[str, Any]] = None,
) -> Query:
"""
Defines the root of the graph with edges.
"""
query_args: List[Argument] = []
# constructing arguments
if filter_query:
query_args.append(Argument(name="query", value=f'"{filter_query}"'))
if self.sort_key:
query_args.append(Argument(name="sortKey", value=self.sort_key))
if additional_query_args:
for k, v in additional_query_args.items():
query_args.append(Argument(name=k, value=v))
# constructing edges
query_fields = [
Field(name="edges", fields=[Field(name="node", fields=edges if edges else ["id"])]),
]
# return constucted query
return Query(name=name, arguments=query_args, fields=query_fields)
def resolve(self, query: Query) -> str:
"""
Default query resolver from type(Operation) > type(str).
Overide this method to build multiple queries in one, if needed.
"""
# return the constructed query operation
return Operation(type="", queries=[query]).render()
def record_process_components(self, record: MutableMapping[str, Any]) -> Iterable[MutableMapping[str, Any]]:
"""
Defines how to process collected components, default `as is`.
"""
yield record
| ShopifyBulkQuery |
python | PyCQA__pylint | tests/functional/r/redefined/redefined_slots.py | {
"start": 698,
"end": 1059
} | class ____(Base, Base2):
"""Adding the `l`, `m`, `n` slots
Redefining the `a`, `b`, & `c` slot already defined in `Base`
Redefining the `i`, `j`, `k` slot already defined in `Base2`
"""
__slots__ = ("a", "b", "c", "i", "j", "k", "l", "m", "n") # [redefined-slots-in-subclass]
# https://github.com/pylint-dev/pylint/issues/6100
| Subclass3 |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 3583,
"end": 3655
} | class ____(WidgetRenderer):
tag = "vf-field-textarea"
| TextareaRenderer |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 66625,
"end": 66835
} | class ____:
xlAllChanges = 2 # from enum XlHighlightChangesTime
xlNotYetReviewed = 3 # from enum XlHighlightChangesTime
xlSinceMyLastSave = 1 # from enum XlHighlightChangesTime
| HighlightChangesTime |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/tex.py | {
"start": 1386,
"end": 10065
} | class ____(Task.Task):
bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
bibtex_fun.__doc__ = """
Execute the program **bibtex**
"""
makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
makeindex_fun.__doc__ = """
Execute the program **makeindex**
"""
makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False)
makeglossaries_fun.__doc__ = """
Execute the program **makeglossaries**
"""
def exec_command(self, cmd, **kw):
if self.env.PROMPT_LATEX:
kw['stdout'] = kw['stderr'] = None
return super(tex, self).exec_command(cmd, **kw)
def scan_aux(self, node):
nodes = [node]
re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M)
def parse_node(node):
code = node.read()
for match in re_aux.finditer(code):
path = match.group('file')
found = node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node %r', found)
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
node = self.inputs[0]
nodes = []
names = []
seen = []
if not node:
return (nodes, names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code = node.read()
for match in re_tex.finditer(code):
multibib = match.group('type')
if multibib and multibib.startswith('bibliography'):
multibib = multibib[len('bibliography'):]
if multibib.startswith('style'):
continue
else:
multibib = None
for path in match.group('file').split(','):
if path:
add_name = True
found = None
for k in exts_deps_tex:
for up in self.texinputs_nodes:
Logs.debug('tex: trying %s%s', path, k)
found = up.find_resource(path + k)
if found:
break
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name = False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
if found and multibib and found.name.endswith('.bib'):
try:
self.multibibs.append(found)
except AttributeError:
self.multibibs = [found]
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s", nodes, names)
return (nodes, names)
def check_status(self, msg, retcode):
if retcode != 0:
raise Errors.WafError('%r command exit status %r' % (msg, retcode))
def info(self, *k, **kw):
try:
info = self.generator.bld.conf.logger.info
except AttributeError:
info = Logs.info
info(*k, **kw)
def bibfile(self):
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r', aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
self.info('calling bibtex')
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()})
self.env.SRCFILE = aux_node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
for node in getattr(self, 'multibibs', []):
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()})
self.env.SRCFILE = node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
def bibunits(self):
try:
bibunits = bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)]
if fn:
self.info('calling bibtex on bibunits')
for f in fn:
self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}
self.env.SRCFILE = f
self.check_status('error when calling bibtex', self.bibtex_fun())
def makeindex(self):
self.idx_node = self.inputs[0].change_ext('.idx')
try:
idx_path = self.idx_node.abspath()
os.stat(idx_path)
except OSError:
self.info('index file %s absent, not calling makeindex', idx_path)
else:
self.info('calling makeindex')
self.env.SRCFILE = self.idx_node.name
self.env.env = {}
self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun())
def bibtopic(self):
p = self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')):
self.aux_nodes += p.ant_glob('*[0-9].aux')
def makeglossaries(self):
src_file = self.inputs[0].abspath()
base_file = os.path.basename(src_file)
base, _ = os.path.splitext(base_file)
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r', aux_node.abspath())
continue
if g_glossaries_re.findall(ct):
if not self.env.MAKEGLOSSARIES:
raise Errors.WafError("The program 'makeglossaries' is missing!")
Logs.warn('calling makeglossaries')
self.env.SRCFILE = base
self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun())
return
def texinputs(self):
return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep
def run(self):
env = self.env
if not env.PROMPT_LATEX:
env.append_value('LATEXFLAGS', '-interaction=batchmode')
env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
env.append_value('XELATEXFLAGS', '-interaction=batchmode')
self.cwd = self.inputs[0].parent.get_bld()
self.info('first pass on %s', self.__class__.__name__)
cur_hash = self.hash_aux_nodes()
self.call_latex()
self.hash_aux_nodes()
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
self.makeglossaries()
for i in range(10):
prev_hash = cur_hash
cur_hash = self.hash_aux_nodes()
if not cur_hash:
Logs.error('No aux.h to process')
if cur_hash and cur_hash == prev_hash:
break
self.info('calling %s', self.__class__.__name__)
self.call_latex()
def hash_aux_nodes(self):
try:
self.aux_nodes
except AttributeError:
try:
self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux'))
except IOError:
return None
return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes])
def call_latex(self):
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.texinputs()})
self.env.SRCFILE = self.inputs[0].abspath()
self.check_status('error when calling latex', self.texfun())
| tex |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 401589,
"end": 403036
} | class ____(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = {arg.constant_result for arg in self.args}
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
| SetNode |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor_export.py | {
"start": 2296,
"end": 2627
} | class ____(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.mlp_0 = MLPModule(device)
self.mlp_1 = MLPModule(device)
def forward(self, input):
with fx_traceback.annotate({"pp_stage": 0}):
x = self.mlp_0(input)
return self.mlp_1(x)
| SimpleModelAnnotated |
python | donnemartin__system-design-primer | solutions/object_oriented_design/online_chat/online_chat.py | {
"start": 1590,
"end": 1788
} | class ____(Chat):
def __init__(self, first_user, second_user):
super(PrivateChat, self).__init__()
self.users.append(first_user)
self.users.append(second_user)
| PrivateChat |
python | streamlit__streamlit | lib/tests/streamlit/components/v2/test_bidi_component.py | {
"start": 39320,
"end": 53286
} | class ____(DeltaGeneratorTestCase):
"""Validate CCv2 identity rules for keyed and unkeyed instances."""
def setUp(self):
super().setUp()
self.manager = BidiComponentManager()
runtime = Runtime.instance()
if runtime is None:
raise RuntimeError("Runtime.instance() returned None in test setup.")
runtime.bidi_component_registry = self.manager
self.manager.register(
BidiComponentDefinition(name="ident", js="console.log('hi');")
)
def _clear_widget_registrations_for_current_run(self) -> None:
"""Allow re-registering the same id within the same run for testing keyed stability."""
ctx = get_script_run_ctx()
assert ctx is not None
ctx.widget_user_keys_this_run.clear()
ctx.widget_ids_this_run.clear()
def _render_and_get_id(self) -> str:
delta = self.get_delta_from_queue()
return delta.new_element.bidi_component.id
def test_unkeyed_id_stable_when_data_is_none(self):
"""Without data, unkeyed components should have stable IDs based on other params."""
st._bidi_component("ident")
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
st._bidi_component("ident")
id2 = self._render_and_get_id()
assert id1 == id2
def test_unkeyed_id_differs_between_none_and_empty_data(self):
"""data=None must produce a different ID than data={} (empty dict is still data)."""
st._bidi_component("ident", data=None)
id_none = self._render_and_get_id()
st._bidi_component("ident", data={})
id_empty = self._render_and_get_id()
assert id_none != id_empty
def test_unkeyed_id_changes_when_json_data_changes(self):
"""Without a user key, changing JSON data must change the backend id."""
st._bidi_component("ident", data={"x": 1})
id1 = self._render_and_get_id()
st._bidi_component("ident", data={"x": 2})
id2 = self._render_and_get_id()
assert id1 != id2
def test_unkeyed_id_changes_when_bytes_change(self):
"""Without a user key, changing bytes must change the backend id."""
st._bidi_component("ident", data=b"abc")
id1 = self._render_and_get_id()
st._bidi_component("ident", data=b"abcd")
id2 = self._render_and_get_id()
assert id1 != id2
def test_unkeyed_id_changes_when_arrow_data_changes(self):
"""Without a user key, changing dataframe content must change the backend id."""
st._bidi_component("ident", data=pd.DataFrame({"a": [1, 2]}))
id1 = self._render_and_get_id()
st._bidi_component("ident", data=pd.DataFrame({"a": [1, 3]}))
id2 = self._render_and_get_id()
assert id1 != id2
def test_unkeyed_id_changes_when_mixed_blobs_change(self):
"""Without a user key, MixedData blob fingerprint differences must change id."""
st._bidi_component("ident", data={"df": pd.DataFrame({"x": [1]})})
id1 = self._render_and_get_id()
st._bidi_component("ident", data={"df": pd.DataFrame({"x": [2]})})
id2 = self._render_and_get_id()
assert id1 != id2
def test_keyed_id_stable_when_data_changes_json(self):
"""With a user key, changing JSON data must NOT change the backend id (same run)."""
st._bidi_component("ident", key="K", data={"v": 1})
id1 = self._render_and_get_id()
# Allow re-registering the same id in the same run for test purposes
self._clear_widget_registrations_for_current_run()
st._bidi_component("ident", key="K", data={"v": 2})
id2 = self._render_and_get_id()
assert id1 == id2
def test_keyed_id_stable_when_mixed_data_changes(self):
"""With a user key, changing MixedData (JSON + blobs) must NOT change the backend id (same run)."""
st._bidi_component(
"ident", key="MIX", data={"df": pd.DataFrame({"a": [1]}), "m": {"x": 1}}
)
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
st._bidi_component(
"ident", key="MIX", data={"df": pd.DataFrame({"a": [2]}), "m": {"x": 2}}
)
id2 = self._render_and_get_id()
assert id1 == id2
def test_unkeyed_id_stable_when_arrow_data_unchanged(self):
"""Without a user key, unchanged dataframe content must keep the same backend id (no needless churn)."""
df1 = pd.DataFrame({"a": [1, 2, 3]})
st._bidi_component("ident", data=df1)
id1 = self._render_and_get_id()
# Allow re-registering the same id in this run for stability assertion
self._clear_widget_registrations_for_current_run()
# New DataFrame object with identical content
df2 = pd.DataFrame({"a": [1, 2, 3]})
st._bidi_component("ident", data=df2)
id2 = self._render_and_get_id()
assert id1 == id2
def test_unkeyed_id_stable_when_mixed_data_unchanged(self):
"""Without a user key, unchanged MixedData must keep the same backend id (no needless churn)."""
mixed1 = {"df": pd.DataFrame({"x": [1, 2]}), "meta": {"k": "v"}}
st._bidi_component("ident", data=mixed1)
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
# New objects but same serialized content and key order
mixed2 = {"df": pd.DataFrame({"x": [1, 2]}), "meta": {"k": "v"}}
st._bidi_component("ident", data=mixed2)
id2 = self._render_and_get_id()
assert id1 == id2
def test_keyed_id_stable_when_data_changes_arrow(self):
"""With a user key, changing Arrow/mixed data must NOT change the backend id (same run)."""
st._bidi_component("ident", key="ARW", data=pd.DataFrame({"a": [1]}))
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
st._bidi_component("ident", key="ARW", data=pd.DataFrame({"a": [2]}))
id2 = self._render_and_get_id()
assert id1 == id2
def test_identity_kwargs_raises_on_unhandled_oneof(self):
"""_build_bidi_identity_kwargs should raise if an unknown oneof is encountered."""
mixin = BidiComponentMixin()
class DummyProto:
def WhichOneof(self, _name: str) -> str:
return "new_unhandled_field"
with pytest.raises(
RuntimeError, match=r"Unhandled BidiComponent\.data oneof field"
):
mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=DummyProto(), # type: ignore[arg-type]
)
def test_identity_kwargs_mixed_blob_keys_are_sorted(self):
"""When computing identity, mixed arrow blob ref IDs must be sorted for stability."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
proto.mixed.json = "{}"
# Insert keys in descending order to verify sorting in identity.
proto.mixed.arrow_blobs["b"].data = b"b"
proto.mixed.arrow_blobs["a"].data = b"a"
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
)
assert identity["mixed_json"] == calc_md5("{}")
assert identity["mixed_arrow_blobs"] == "a,b"
def test_identity_kwargs_json_canonicalizes_order(self):
"""Identity canonicalization should ignore key insertion order for JSON data."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
proto.json = json.dumps({"b": 2, "a": 1})
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
)
expected = json.dumps({"a": 1, "b": 2}, sort_keys=True)
assert identity["json"] == calc_md5(expected)
def test_identity_kwargs_mixed_json_canonicalizes_order(self):
"""MixedData identity must canonicalize JSON portion independently of storage order."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
proto.mixed.json = json.dumps({"b": 2, "a": 1})
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
)
expected = json.dumps({"a": 1, "b": 2}, sort_keys=True)
assert identity["mixed_json"] == calc_md5(expected)
def test_identity_kwargs_bytes_use_digest(self):
"""Raw byte payloads should contribute content digests, not the full payload."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
proto.bytes = b"bytes payload"
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
)
assert identity["bytes"] == calc_md5(b"bytes payload")
def test_identity_kwargs_arrow_data_use_digest(self):
"""Arrow payloads should contribute digests to avoid hashing large blobs repeatedly."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
proto.arrow_data.data = b"\x00\x01"
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
)
assert identity["arrow_data"] == calc_md5(b"\x00\x01")
def test_unkeyed_id_stable_when_json_key_order_changes(self):
"""Without a user key, changing the insertion order of keys in a JSON dict should NOT change the backend id."""
data1 = {"a": 1, "b": 2}
data2 = {"b": 2, "a": 1}
st._bidi_component("ident", data=data1)
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
st._bidi_component("ident", data=data2)
id2 = self._render_and_get_id()
assert id1 == id2
def test_unkeyed_id_stable_when_mixed_data_json_key_order_changes(self):
"""Without a user key, changing the insertion order of keys in the JSON
part of MixedData should NOT change the backend id."""
# We use different dataframes (same content) to trigger mixed processing but keep blobs same
df1 = pd.DataFrame({"c": [3]})
df2 = pd.DataFrame({"c": [3]})
data1 = {"df": df1, "meta": {"a": 1, "b": 2}}
data2 = {"df": df2, "meta": {"b": 2, "a": 1}}
st._bidi_component("ident", data=data1)
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
st._bidi_component("ident", data=data2)
id2 = self._render_and_get_id()
assert id1 == id2
def test_unkeyed_id_stable_with_duplicate_dataframe_content(self):
"""Two different keys with identical DataFrame content should produce stable IDs.
This validates content-addressing deduplication: identical DataFrames under
different keys share the same blob ref ID, so the identity is stable.
"""
data1 = {"df1": pd.DataFrame({"x": [1]}), "df2": pd.DataFrame({"x": [1]})}
st._bidi_component("ident", data=data1)
id1 = self._render_and_get_id()
self._clear_widget_registrations_for_current_run()
# Same structure, new DataFrame objects with identical content
data2 = {"df1": pd.DataFrame({"x": [1]}), "df2": pd.DataFrame({"x": [1]})}
st._bidi_component("ident", data=data2)
id2 = self._render_and_get_id()
assert id1 == id2
def test_identity_kwargs_uses_optimization_when_data_provided(self):
"""When data is provided, identity calculation should skip unnecessary deserialization."""
mixin = BidiComponentMixin()
proto = BidiComponentProto()
data = {"a": 1, "b": 2}
# Pre-populate proto.json to simulate what happens in main
proto.json = json.dumps(data)
# Mock _canonical_json_digest_for_identity to ensure it's NOT called
# when the optimization path is taken.
with patch.object(mixin, "_canonical_json_digest_for_identity") as mock_digest:
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
data=data,
)
# Verify the result is correct (sorted keys)
expected_canonical = json.dumps(data, sort_keys=True)
assert identity["json"] == calc_md5(expected_canonical)
# Verify the slow path was skipped
mock_digest.assert_not_called()
# Verify behavior WITHOUT data (slow path fallback)
with patch.object(
mixin,
"_canonical_json_digest_for_identity",
wraps=mixin._canonical_json_digest_for_identity,
) as mock_digest:
identity = mixin._build_bidi_identity_kwargs(
component_name="cmp",
isolate_styles=True,
width="stretch",
height="content",
proto=proto,
data=None,
)
# Verify result is still correct
assert identity["json"] == calc_md5(expected_canonical)
# Verify the slow path WAS called
mock_digest.assert_called_once()
| BidiComponentIdentityTest |
python | ansible__ansible | test/units/modules/test_unarchive.py | {
"start": 729,
"end": 3293
} | class ____:
@pytest.mark.parametrize(
'side_effect, expected_reason', (
([ValueError, '/bin/zipinfo'], "Unable to find required 'unzip'"),
(ValueError, "Unable to find required 'unzip' or 'zipinfo'"),
)
)
def test_no_zip_zipinfo_binary(self, mocker, fake_ansible_module, side_effect, expected_reason):
mocker.patch("ansible.modules.unarchive.get_bin_path", side_effect=side_effect)
fake_ansible_module.params = {
"extra_opts": "",
"exclude": "",
"include": "",
"io_buffer_size": 65536,
}
z = ZipArchive(
src="",
b_dest="",
file_args="",
module=fake_ansible_module,
)
can_handle, reason = z.can_handle_archive()
assert can_handle is False
assert expected_reason in reason
assert z.cmd_path is None
@pytest.mark.parametrize(
("test_input", "expected"),
[
pytest.param(
"19800000.000000",
time.mktime(time.struct_time((1980, 0, 0, 0, 0, 0, 0, 0, 0))),
id="invalid-month-1980",
),
pytest.param(
"19791231.000000",
time.mktime(time.struct_time((1980, 1, 1, 0, 0, 0, 0, 0, 0))),
id="invalid-year-1979",
),
pytest.param(
"19810101.000000",
time.mktime(time.struct_time((1981, 1, 1, 0, 0, 0, 0, 0, 0))),
id="valid-datetime",
),
pytest.param(
"21081231.000000",
max_zip_timestamp(),
id="invalid-year-2108",
),
pytest.param(
"INVALID_TIME_DATE",
time.mktime(time.struct_time((1980, 1, 1, 0, 0, 0, 0, 0, 0))),
id="invalid-datetime",
),
],
)
def test_valid_time_stamp(self, mocker, fake_ansible_module, test_input, expected):
mocker.patch(
"ansible.modules.unarchive.get_bin_path",
side_effect=["/bin/unzip", "/bin/zipinfo"],
)
fake_ansible_module.params = {
"extra_opts": "",
"exclude": "",
"include": "",
"io_buffer_size": 65536,
}
z = ZipArchive(
src="",
b_dest="",
file_args="",
module=fake_ansible_module,
)
assert z._valid_time_stamp(test_input) == expected
| TestCaseZipArchive |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 5444,
"end": 5542
} | class ____(Web3Exception):
"""
Raised when the event ABI is invalid.
"""
| InvalidEventABI |
python | apache__airflow | airflow-core/tests/unit/utils/test_scheduler_health.py | {
"start": 1286,
"end": 3469
} | class ____:
def setup_method(self) -> None:
self.mock_server = MockServer()
# This test is to ensure that the server responds correctly to a GET request on the correct endpoint.
@mock.patch.object(BaseHTTPRequestHandler, "send_error")
def test_incorrect_endpoint(self, mock_send_error):
self.mock_server.do_GET("/incorrect")
mock_send_error.assert_called_with(404)
# This test is to ensure that if the scheduler is healthy, it returns 200 status code.
@mock.patch.object(BaseHTTPRequestHandler, "end_headers")
@mock.patch.object(BaseHTTPRequestHandler, "send_response")
@mock.patch("airflow.utils.scheduler_health.create_session")
def test_healthy_scheduler(self, mock_session, mock_send_response, mock_end_headers):
mock_scheduler_job = MagicMock()
mock_scheduler_job.is_alive.return_value = True
mock_session.return_value.__enter__.return_value.query.return_value = mock_scheduler_job
self.mock_server.do_GET("/health")
mock_send_response.assert_called_once_with(200)
mock_end_headers.assert_called_once()
# This test is to ensure that if the scheduler is unhealthy, it returns 503 error code.
@mock.patch.object(BaseHTTPRequestHandler, "send_error")
@mock.patch("airflow.utils.scheduler_health.create_session")
def test_unhealthy_scheduler(self, mock_session, mock_send_error):
mock_scheduler_job = MagicMock()
mock_scheduler_job.is_alive.return_value = False
mock_session.return_value.__enter__.return_value.query.return_value = mock_scheduler_job
self.mock_server.do_GET("/health")
mock_send_error.assert_called_with(503)
# This test is to ensure that if there's no scheduler job running, it returns 503 error code.
@mock.patch.object(BaseHTTPRequestHandler, "send_error")
@mock.patch("airflow.utils.scheduler_health.create_session")
def test_missing_scheduler(self, mock_session, mock_send_error):
mock_session.return_value.__enter__.return_value.query.return_value = None
self.mock_server.do_GET("/health")
mock_send_error.assert_called_with(503)
| TestSchedulerHealthServer |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 11060,
"end": 11162
} | class ____(VyperException):
"""Some feature is known to be not implemented"""
| UnimplementedException |
python | langchain-ai__langchain | libs/partners/anthropic/tests/integration_tests/test_chat_models.py | {
"start": 35772,
"end": 78957
} | class ____(BaseModel):
"""Get the current weather in a given location."""
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
@pytest.mark.parametrize("tool_choice", ["GetWeather", "auto", "any"])
def test_anthropic_bind_tools_tool_choice(tool_choice: str) -> None:
chat_model = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
)
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice=tool_choice)
response = chat_model_with_tools.invoke("what's the weather in ny and la")
assert isinstance(response, AIMessage)
def test_pdf_document_input() -> None:
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
data = b64encode(requests.get(url, timeout=10).content).decode()
result = ChatAnthropic(model=MODEL_NAME).invoke( # type: ignore[call-arg]
[
HumanMessage(
[
"summarize this document",
{
"type": "document",
"source": {
"type": "base64",
"data": data,
"media_type": "application/pdf",
},
},
],
),
],
)
assert isinstance(result, AIMessage)
assert isinstance(result.content, str)
assert len(result.content) > 0
@pytest.mark.default_cassette("test_agent_loop.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_agent_loop(output_version: Literal["v0", "v1"]) -> None:
@tool
def get_weather(location: str) -> str:
"""Get the weather for a location."""
return "It's sunny."
llm = ChatAnthropic(model=MODEL_NAME, output_version=output_version) # type: ignore[call-arg]
llm_with_tools = llm.bind_tools([get_weather])
input_message = HumanMessage("What is the weather in San Francisco, CA?")
tool_call_message = llm_with_tools.invoke([input_message])
assert isinstance(tool_call_message, AIMessage)
tool_calls = tool_call_message.tool_calls
assert len(tool_calls) == 1
tool_call = tool_calls[0]
tool_message = get_weather.invoke(tool_call)
assert isinstance(tool_message, ToolMessage)
response = llm_with_tools.invoke(
[
input_message,
tool_call_message,
tool_message,
]
)
assert isinstance(response, AIMessage)
@pytest.mark.default_cassette("test_agent_loop_streaming.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_agent_loop_streaming(output_version: Literal["v0", "v1"]) -> None:
@tool
def get_weather(location: str) -> str:
"""Get the weather for a location."""
return "It's sunny."
llm = ChatAnthropic(
model=MODEL_NAME,
streaming=True,
output_version=output_version, # type: ignore[call-arg]
)
llm_with_tools = llm.bind_tools([get_weather])
input_message = HumanMessage("What is the weather in San Francisco, CA?")
tool_call_message = llm_with_tools.invoke([input_message])
assert isinstance(tool_call_message, AIMessage)
tool_calls = tool_call_message.tool_calls
assert len(tool_calls) == 1
tool_call = tool_calls[0]
tool_message = get_weather.invoke(tool_call)
assert isinstance(tool_message, ToolMessage)
response = llm_with_tools.invoke(
[
input_message,
tool_call_message,
tool_message,
]
)
assert isinstance(response, AIMessage)
@pytest.mark.default_cassette("test_citations.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_citations(output_version: Literal["v0", "v1"]) -> None:
llm = ChatAnthropic(model=MODEL_NAME, output_version=output_version) # type: ignore[call-arg]
messages = [
{
"role": "user",
"content": [
{
"type": "document",
"source": {
"type": "content",
"content": [
{"type": "text", "text": "The grass is green"},
{"type": "text", "text": "The sky is blue"},
],
},
"citations": {"enabled": True},
},
{"type": "text", "text": "What color is the grass and sky?"},
],
},
]
response = llm.invoke(messages)
assert isinstance(response, AIMessage)
assert isinstance(response.content, list)
if output_version == "v1":
assert any("annotations" in block for block in response.content)
else:
assert any("citations" in block for block in response.content)
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm.stream(messages):
full = cast("BaseMessageChunk", chunk) if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
assert not any("citation" in block for block in full.content)
if output_version == "v1":
assert any("annotations" in block for block in full.content)
else:
assert any("citations" in block for block in full.content)
# Test pass back in
next_message = {
"role": "user",
"content": "Can you comment on the citations you just made?",
}
_ = llm.invoke([*messages, full, next_message])
@pytest.mark.vcr
def test_thinking() -> None:
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
max_tokens=5_000, # type: ignore[call-arg]
thinking={"type": "enabled", "budget_tokens": 2_000},
)
input_message = {"role": "user", "content": "Hello"}
response = llm.invoke([input_message])
assert any("thinking" in block for block in response.content)
for block in response.content:
assert isinstance(block, dict)
if block["type"] == "thinking":
assert set(block.keys()) == {"type", "thinking", "signature"}
assert block["thinking"]
assert isinstance(block["thinking"], str)
assert block["signature"]
assert isinstance(block["signature"], str)
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm.stream([input_message]):
full = cast("BaseMessageChunk", chunk) if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
assert any("thinking" in block for block in full.content)
for block in full.content:
assert isinstance(block, dict)
if block["type"] == "thinking":
assert set(block.keys()) == {"type", "thinking", "signature", "index"}
assert block["thinking"]
assert isinstance(block["thinking"], str)
assert block["signature"]
assert isinstance(block["signature"], str)
# Test pass back in
next_message = {"role": "user", "content": "How are you?"}
_ = llm.invoke([input_message, full, next_message])
@pytest.mark.default_cassette("test_thinking.yaml.gz")
@pytest.mark.vcr
def test_thinking_v1() -> None:
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
max_tokens=5_000, # type: ignore[call-arg]
thinking={"type": "enabled", "budget_tokens": 2_000},
output_version="v1",
)
input_message = {"role": "user", "content": "Hello"}
response = llm.invoke([input_message])
assert any("reasoning" in block for block in response.content)
for block in response.content:
assert isinstance(block, dict)
if block["type"] == "reasoning":
assert set(block.keys()) == {"type", "reasoning", "extras"}
assert block["reasoning"]
assert isinstance(block["reasoning"], str)
signature = block["extras"]["signature"]
assert signature
assert isinstance(signature, str)
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm.stream([input_message]):
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
assert any("reasoning" in block for block in full.content)
for block in full.content:
assert isinstance(block, dict)
if block["type"] == "reasoning":
assert set(block.keys()) == {"type", "reasoning", "extras", "index"}
assert block["reasoning"]
assert isinstance(block["reasoning"], str)
signature = block["extras"]["signature"]
assert signature
assert isinstance(signature, str)
# Test pass back in
next_message = {"role": "user", "content": "How are you?"}
_ = llm.invoke([input_message, full, next_message])
@pytest.mark.default_cassette("test_redacted_thinking.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_redacted_thinking(output_version: Literal["v0", "v1"]) -> None:
llm = ChatAnthropic(
# It appears that Sonnet 4.5 either: isn't returning redacted thinking blocks,
# or the magic string is broken? Retry later once 3-7 finally removed
model="claude-3-7-sonnet-latest", # type: ignore[call-arg]
max_tokens=5_000, # type: ignore[call-arg]
thinking={"type": "enabled", "budget_tokens": 2_000},
output_version=output_version,
)
query = "ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB" # noqa: E501
input_message = {"role": "user", "content": query}
response = llm.invoke([input_message])
value = None
for block in response.content:
assert isinstance(block, dict)
if block["type"] == "redacted_thinking":
value = block
elif (
block["type"] == "non_standard"
and block["value"]["type"] == "redacted_thinking"
):
value = block["value"]
else:
pass
if value:
assert set(value.keys()) == {"type", "data"}
assert value["data"]
assert isinstance(value["data"], str)
assert value is not None
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm.stream([input_message]):
full = cast("BaseMessageChunk", chunk) if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
value = None
for block in full.content:
assert isinstance(block, dict)
if block["type"] == "redacted_thinking":
value = block
assert set(value.keys()) == {"type", "data", "index"}
assert "index" in block
elif (
block["type"] == "non_standard"
and block["value"]["type"] == "redacted_thinking"
):
value = block["value"]
assert isinstance(value, dict)
assert set(value.keys()) == {"type", "data"}
assert "index" in block
else:
pass
if value:
assert value["data"]
assert isinstance(value["data"], str)
assert value is not None
# Test pass back in
next_message = {"role": "user", "content": "What?"}
_ = llm.invoke([input_message, full, next_message])
def test_structured_output_thinking_enabled() -> None:
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
max_tokens=5_000, # type: ignore[call-arg]
thinking={"type": "enabled", "budget_tokens": 2_000},
)
with pytest.warns(match="structured output"):
structured_llm = llm.with_structured_output(GenerateUsername)
query = "Generate a username for Sally with green hair"
response = structured_llm.invoke(query)
assert isinstance(response, GenerateUsername)
with pytest.raises(OutputParserException):
structured_llm.invoke("Hello")
# Test streaming
for chunk in structured_llm.stream(query):
assert isinstance(chunk, GenerateUsername)
def test_structured_output_thinking_force_tool_use() -> None:
# Structured output currently relies on forced tool use, which is not supported
# when `thinking` is enabled. When this test fails, it means that the feature
# is supported and the workarounds in `with_structured_output` should be removed.
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
max_tokens=5_000, # type: ignore[call-arg]
thinking={"type": "enabled", "budget_tokens": 2_000},
).bind_tools(
[GenerateUsername],
tool_choice="GenerateUsername",
)
with pytest.raises(BadRequestError):
llm.invoke("Generate a username for Sally with green hair")
def test_image_tool_calling() -> None:
"""Test tool calling with image inputs."""
class color_picker(BaseModel): # noqa: N801
"""Input your fav color and get a random fact about it."""
fav_color: str
human_content: list[dict] = [
{
"type": "text",
"text": "what's your favorite color in this image",
},
]
image_url = "https://raw.githubusercontent.com/langchain-ai/docs/4d11d08b6b0e210bd456943f7a22febbd168b543/src/images/agentic-rag-output.png"
image_data = b64encode(httpx.get(image_url).content).decode("utf-8")
human_content.append(
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": image_data,
},
},
)
messages = [
SystemMessage("you're a good assistant"),
HumanMessage(human_content), # type: ignore[arg-type]
AIMessage(
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "purple"},
"id": "foo",
"name": "color_picker",
},
],
),
HumanMessage(
[
{
"type": "tool_result",
"tool_use_id": "foo",
"content": [
{
"type": "text",
"text": "purple is a great pick! that's my sister's favorite color", # noqa: E501
},
],
"is_error": False,
},
{"type": "text", "text": "what's my sister's favorite color"},
],
),
]
llm = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
_ = llm.bind_tools([color_picker]).invoke(messages)
@pytest.mark.default_cassette("test_web_search.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_web_search(output_version: Literal["v0", "v1"]) -> None:
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
max_tokens=1024,
output_version=output_version,
)
tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 1}
llm_with_tools = llm.bind_tools([tool])
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": "How do I update a web app to TypeScript 5.5?",
},
],
}
response = llm_with_tools.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {block["type"] for block in response.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "server_tool_use", "web_search_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
block_types = {block["type"] for block in full.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "server_tool_use", "web_search_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test we can pass back in
next_message = {
"role": "user",
"content": "Please repeat the last search, but focus on sources from 2024.",
}
_ = llm_with_tools.invoke(
[input_message, full, next_message],
)
@pytest.mark.vcr
def test_web_fetch() -> None:
"""Note: this is a beta feature.
TODO: Update to remove beta once it's generally available.
"""
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
max_tokens=1024,
betas=["web-fetch-2025-09-10"],
)
tool = {"type": "web_fetch_20250910", "name": "web_fetch", "max_uses": 1}
llm_with_tools = llm.bind_tools([tool])
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": "Fetch the content at https://docs.langchain.com and analyze",
},
],
}
response = llm_with_tools.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {
block["type"] for block in response.content if isinstance(block, dict)
}
# A successful fetch call should include:
# 1. text response from the model (e.g. "I'll fetch that for you")
# 2. server_tool_use block indicating the tool was called (using tool "web_fetch")
# 3. web_fetch_tool_result block with the results of said fetch
assert block_types == {"text", "server_tool_use", "web_fetch_tool_result"}
# Verify web fetch result structure
web_fetch_results = [
block
for block in response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
]
assert len(web_fetch_results) == 1 # Since max_uses=1
fetch_result = web_fetch_results[0]
assert "content" in fetch_result
assert "url" in fetch_result["content"]
assert "retrieved_at" in fetch_result["content"]
# Fetch with citations enabled
tool_with_citations = tool.copy()
tool_with_citations["citations"] = {"enabled": True}
llm_with_citations = llm.bind_tools([tool_with_citations])
citation_message = {
"role": "user",
"content": (
"Fetch https://docs.langchain.com and provide specific quotes with "
"citations"
),
}
citation_response = llm_with_citations.invoke([citation_message])
citation_results = [
block
for block in citation_response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
]
assert len(citation_results) == 1 # Since max_uses=1
citation_result = citation_results[0]
assert citation_result["content"]["content"]["citations"]["enabled"]
text_blocks = [
block
for block in citation_response.content
if isinstance(block, dict) and block.get("type") == "text"
]
# Check that the response contains actual citations in the content
has_citations = False
for block in text_blocks:
citations = block.get("citations", [])
for citation in citations:
if citation.get("type") and citation.get("start_char_index"):
has_citations = True
break
assert has_citations, (
"Expected inline citation tags in response when citations are enabled for "
"web fetch"
)
# Max content tokens param
tool_with_limit = tool.copy()
tool_with_limit["max_content_tokens"] = 1000
llm_with_limit = llm.bind_tools([tool_with_limit])
limit_response = llm_with_limit.invoke([input_message])
# Response should still work even with content limits
assert any(
block["type"] == "web_fetch_tool_result"
for block in limit_response.content
if isinstance(block, dict)
)
# Domains filtering (note: only one can be set at a time)
tool_with_allowed_domains = tool.copy()
tool_with_allowed_domains["allowed_domains"] = ["docs.langchain.com"]
llm_with_allowed = llm.bind_tools([tool_with_allowed_domains])
allowed_response = llm_with_allowed.invoke([input_message])
assert any(
block["type"] == "web_fetch_tool_result"
for block in allowed_response.content
if isinstance(block, dict)
)
# Test that a disallowed domain doesn't work
tool_with_disallowed_domains = tool.copy()
tool_with_disallowed_domains["allowed_domains"] = [
"example.com"
] # Not docs.langchain.com
llm_with_disallowed = llm.bind_tools([tool_with_disallowed_domains])
disallowed_response = llm_with_disallowed.invoke([input_message])
# We should get an error result since the domain (docs.langchain.com) is not allowed
disallowed_results = [
block
for block in disallowed_response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
]
if disallowed_results:
disallowed_result = disallowed_results[0]
if disallowed_result.get("content", {}).get("type") == "web_fetch_tool_error":
assert disallowed_result["content"]["error_code"] in [
"invalid_url",
"fetch_failed",
]
# Blocked domains filtering
tool_with_blocked_domains = tool.copy()
tool_with_blocked_domains["blocked_domains"] = ["example.com"]
llm_with_blocked = llm.bind_tools([tool_with_blocked_domains])
blocked_response = llm_with_blocked.invoke([input_message])
assert any(
block["type"] == "web_fetch_tool_result"
for block in blocked_response.content
if isinstance(block, dict)
)
# Test fetching from a blocked domain fails
blocked_domain_message = {
"role": "user",
"content": "Fetch https://example.com and analyze",
}
tool_with_blocked_example = tool.copy()
tool_with_blocked_example["blocked_domains"] = ["example.com"]
llm_with_blocked_example = llm.bind_tools([tool_with_blocked_example])
blocked_domain_response = llm_with_blocked_example.invoke([blocked_domain_message])
# Should get an error when trying to access a blocked domain
blocked_domain_results = [
block
for block in blocked_domain_response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
]
if blocked_domain_results:
blocked_result = blocked_domain_results[0]
if blocked_result.get("content", {}).get("type") == "web_fetch_tool_error":
assert blocked_result["content"]["error_code"] in [
"invalid_url",
"fetch_failed",
]
# Max uses parameter - test exceeding the limit
multi_fetch_message = {
"role": "user",
"content": (
"Fetch https://docs.langchain.com and then try to fetch "
"https://langchain.com"
),
}
max_uses_response = llm_with_tools.invoke([multi_fetch_message])
# Should contain at least one fetch result and potentially an error for the second
fetch_results = [
block
for block in max_uses_response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
] # type: ignore[index]
assert len(fetch_results) >= 1
error_results = [
r
for r in fetch_results
if r.get("content", {}).get("type") == "web_fetch_tool_error"
]
if error_results:
assert any(
r["content"]["error_code"] == "max_uses_exceeded" for r in error_results
)
# Streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
block_types = {block["type"] for block in full.content if isinstance(block, dict)}
assert block_types == {"text", "server_tool_use", "web_fetch_tool_result"}
# Test that URLs from context can be used in follow-up
next_message = {
"role": "user",
"content": "What does the site you just fetched say about models?",
}
follow_up_response = llm_with_tools.invoke(
[input_message, full, next_message],
)
# Should work without issues since URL was already in context
assert isinstance(follow_up_response.content, (list, str))
# Error handling - test with an invalid URL format
error_message = {
"role": "user",
"content": "Try to fetch this invalid URL: not-a-valid-url",
}
error_response = llm_with_tools.invoke([error_message])
# Should handle the error gracefully
assert isinstance(error_response.content, (list, str))
# PDF document fetching
pdf_message = {
"role": "user",
"content": (
"Fetch this PDF: "
"https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf "
"and summarize its content",
),
}
pdf_response = llm_with_tools.invoke([pdf_message])
assert any(
block["type"] == "web_fetch_tool_result"
for block in pdf_response.content
if isinstance(block, dict)
)
# Verify PDF content structure (should have base64 data for PDFs)
pdf_results = [
block
for block in pdf_response.content
if isinstance(block, dict) and block.get("type") == "web_fetch_tool_result"
]
if pdf_results:
pdf_result = pdf_results[0]
content = pdf_result.get("content", {})
if content.get("content", {}).get("source", {}).get("type") == "base64":
assert content["content"]["source"]["media_type"] == "application/pdf"
assert "data" in content["content"]["source"]
@pytest.mark.default_cassette("test_web_fetch_v1.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_web_fetch_v1(output_version: Literal["v0", "v1"]) -> None:
"""Test that http calls are unchanged between v0 and v1."""
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
betas=["web-fetch-2025-09-10"],
output_version=output_version,
)
if output_version == "v0":
call_key = "server_tool_use"
result_key = "web_fetch_tool_result"
else:
# v1
call_key = "server_tool_call"
result_key = "server_tool_result"
tool = {
"type": "web_fetch_20250910",
"name": "web_fetch",
"max_uses": 1,
"citations": {"enabled": True},
}
llm_with_tools = llm.bind_tools([tool])
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": "Fetch the content at https://docs.langchain.com and analyze",
},
],
}
response = llm_with_tools.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {block["type"] for block in response.content} # type: ignore[index]
assert block_types == {"text", call_key, result_key}
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
block_types = {block["type"] for block in full.content} # type: ignore[index]
assert block_types == {"text", call_key, result_key}
# Test we can pass back in
next_message = {
"role": "user",
"content": "What does the site you just fetched say about models?",
}
_ = llm_with_tools.invoke(
[input_message, full, next_message],
)
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_code_execution_old(output_version: Literal["v0", "v1"]) -> None:
"""Note: this tests the `code_execution_20250522` tool, which is now legacy.
See the `test_code_execution` test below to test the current
`code_execution_20250825` tool.
Migration guide: https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool#upgrade-to-latest-tool-version
"""
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
betas=["code-execution-2025-05-22"],
output_version=output_version,
)
tool = {"type": "code_execution_20250522", "name": "code_execution"}
llm_with_tools = llm.bind_tools([tool])
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": (
"Calculate the mean and standard deviation of "
"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
),
},
],
}
response = llm_with_tools.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {block["type"] for block in response.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "server_tool_use", "code_execution_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
block_types = {block["type"] for block in full.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "server_tool_use", "code_execution_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test we can pass back in
next_message = {
"role": "user",
"content": "Please add more comments to the code.",
}
_ = llm_with_tools.invoke(
[input_message, full, next_message],
)
@pytest.mark.default_cassette("test_code_execution.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_code_execution(output_version: Literal["v0", "v1"]) -> None:
"""Note: this is a beta feature.
TODO: Update to remove beta once generally available.
"""
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
betas=["code-execution-2025-08-25"],
output_version=output_version,
)
tool = {"type": "code_execution_20250825", "name": "code_execution"}
llm_with_tools = llm.bind_tools([tool])
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": (
"Calculate the mean and standard deviation of "
"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
),
},
],
}
response = llm_with_tools.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {block["type"] for block in response.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {
"text",
"server_tool_use",
"text_editor_code_execution_tool_result",
"bash_code_execution_tool_result",
}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
block_types = {block["type"] for block in full.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {
"text",
"server_tool_use",
"text_editor_code_execution_tool_result",
"bash_code_execution_tool_result",
}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test we can pass back in
next_message = {
"role": "user",
"content": "Please add more comments to the code.",
}
_ = llm_with_tools.invoke(
[input_message, full, next_message],
)
@pytest.mark.default_cassette("test_remote_mcp.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_remote_mcp(output_version: Literal["v0", "v1"]) -> None:
"""Note: this is a beta feature.
TODO: Update to remove beta once generally available.
"""
mcp_servers = [
{
"type": "url",
"url": "https://mcp.deepwiki.com/mcp",
"name": "deepwiki",
"tool_configuration": {"enabled": True, "allowed_tools": ["ask_question"]},
"authorization_token": "PLACEHOLDER",
},
]
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
betas=["mcp-client-2025-04-04"],
mcp_servers=mcp_servers,
max_tokens=10_000, # type: ignore[call-arg]
output_version=output_version,
)
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": (
"What transport protocols does the 2025-03-26 version of the MCP "
"spec (modelcontextprotocol/modelcontextprotocol) support?"
),
},
],
}
response = llm.invoke([input_message])
assert all(isinstance(block, dict) for block in response.content)
block_types = {block["type"] for block in response.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "mcp_tool_use", "mcp_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list)
assert all(isinstance(block, dict) for block in full.content)
block_types = {block["type"] for block in full.content} # type: ignore[index]
if output_version == "v0":
assert block_types == {"text", "mcp_tool_use", "mcp_tool_result"}
else:
assert block_types == {"text", "server_tool_call", "server_tool_result"}
# Test we can pass back in
next_message = {
"role": "user",
"content": "Please query the same tool again, but add 'please' to your query.",
}
_ = llm.invoke(
[input_message, full, next_message],
)
@pytest.mark.parametrize("block_format", ["anthropic", "standard"])
def test_files_api_image(block_format: str) -> None:
"""Note: this is a beta feature.
TODO: Update to remove beta once generally available.
"""
image_file_id = os.getenv("ANTHROPIC_FILES_API_IMAGE_ID")
if not image_file_id:
pytest.skip()
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
betas=["files-api-2025-04-14"],
)
if block_format == "anthropic":
block = {
"type": "image",
"source": {
"type": "file",
"file_id": image_file_id,
},
}
else:
# standard block format
block = {
"type": "image",
"file_id": image_file_id,
}
input_message = {
"role": "user",
"content": [
{"type": "text", "text": "Describe this image."},
block,
],
}
_ = llm.invoke([input_message])
@pytest.mark.parametrize("block_format", ["anthropic", "standard"])
def test_files_api_pdf(block_format: str) -> None:
"""Note: this is a beta feature.
TODO: Update to remove beta once generally available.
"""
pdf_file_id = os.getenv("ANTHROPIC_FILES_API_PDF_ID")
if not pdf_file_id:
pytest.skip()
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
betas=["files-api-2025-04-14"],
)
if block_format == "anthropic":
block = {"type": "document", "source": {"type": "file", "file_id": pdf_file_id}}
else:
# standard block format
block = {
"type": "file",
"file_id": pdf_file_id,
}
input_message = {
"role": "user",
"content": [
{"type": "text", "text": "Describe this document."},
block,
],
}
_ = llm.invoke([input_message])
@pytest.mark.vcr
def test_search_result_tool_message() -> None:
"""Test that we can pass a search result tool message to the model."""
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
)
@tool
def retrieval_tool(query: str) -> list[dict]:
"""Retrieve information from a knowledge base."""
return [
{
"type": "search_result",
"title": "Leave policy",
"source": "HR Leave Policy 2025",
"citations": {"enabled": True},
"content": [
{
"type": "text",
"text": (
"To request vacation days, submit a leave request form "
"through the HR portal. Approval will be sent by email."
),
},
],
},
]
tool_call = {
"type": "tool_call",
"name": "retrieval_tool",
"args": {"query": "vacation days request process"},
"id": "toolu_abc123",
}
tool_message = retrieval_tool.invoke(tool_call)
assert isinstance(tool_message, ToolMessage)
assert isinstance(tool_message.content, list)
messages = [
HumanMessage("How do I request vacation days?"),
AIMessage(
[{"type": "text", "text": "Let me look that up for you."}],
tool_calls=[tool_call],
),
tool_message,
]
result = llm.invoke(messages)
assert isinstance(result, AIMessage)
assert isinstance(result.content, list)
assert any("citations" in block for block in result.content)
assert (
_convert_from_v1_to_anthropic(result.content_blocks, [], "anthropic")
== result.content
)
def test_search_result_top_level() -> None:
llm = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
)
input_message = HumanMessage(
[
{
"type": "search_result",
"title": "Leave policy",
"source": "HR Leave Policy 2025 - page 1",
"citations": {"enabled": True},
"content": [
{
"type": "text",
"text": (
"To request vacation days, submit a leave request form "
"through the HR portal. Approval will be sent by email."
),
},
],
},
{
"type": "search_result",
"title": "Leave policy",
"source": "HR Leave Policy 2025 - page 2",
"citations": {"enabled": True},
"content": [
{
"type": "text",
"text": "Managers have 3 days to approve a request.",
},
],
},
{
"type": "text",
"text": "How do I request vacation days?",
},
],
)
result = llm.invoke([input_message])
assert isinstance(result, AIMessage)
assert isinstance(result.content, list)
assert any("citations" in block for block in result.content)
assert (
_convert_from_v1_to_anthropic(result.content_blocks, [], "anthropic")
== result.content
)
def test_memory_tool() -> None:
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
betas=["context-management-2025-06-27"],
)
llm_with_tools = llm.bind_tools([{"type": "memory_20250818", "name": "memory"}])
response = llm_with_tools.invoke("What are my interests?")
assert isinstance(response, AIMessage)
assert response.tool_calls
assert response.tool_calls[0]["name"] == "memory"
@pytest.mark.vcr
def test_context_management() -> None:
# TODO: update example to trigger action
llm = ChatAnthropic(
model="claude-sonnet-4-5-20250929", # type: ignore[call-arg]
betas=["context-management-2025-06-27"],
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"trigger": {"type": "input_tokens", "value": 10},
"clear_at_least": {"type": "input_tokens", "value": 5},
}
]
},
max_tokens=1024, # type: ignore[call-arg]
)
llm_with_tools = llm.bind_tools(
[{"type": "web_search_20250305", "name": "web_search"}]
)
input_message = {"role": "user", "content": "Search for recent developments in AI"}
response = llm_with_tools.invoke([input_message])
assert response.response_metadata.get("context_management")
# Test streaming
full: BaseMessageChunk | None = None
for chunk in llm_with_tools.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.response_metadata.get("context_management")
def test_async_shared_client() -> None:
llm = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
_ = asyncio.run(llm.ainvoke("Hello"))
_ = asyncio.run(llm.ainvoke("Hello"))
| GetWeather |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 92732,
"end": 96079
} | class ____(Response):
"""
Response of events.get_task_plots endpoint.
:param plots: Plots list
:type plots: Sequence[dict]
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_task_plots"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"plots": {
"description": "Plots list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
plots: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskPlotsResponse, self).__init__(**kwargs)
self.plots = plots
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("plots")
def plots(self) -> Optional[List[dict]]:
return self._property_plots
@plots.setter
def plots(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_plots = None
return
self.assert_isinstance(value, "plots", (list, tuple))
self.assert_isinstance(value, "plots", (dict,), is_array=True)
self._property_plots = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetTaskPlotsResponse |
python | optuna__optuna | optuna/terminator/improvement/evaluator.py | {
"start": 3295,
"end": 3561
} | class ____(metaclass=abc.ABCMeta):
"""Base class for improvement evaluators."""
@abc.abstractmethod
def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float:
pass
@experimental_class("3.2.0")
| BaseImprovementEvaluator |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/common/struct_store/sql.py | {
"start": 459,
"end": 2617
} | class ____(BaseStructDatapointExtractor):
"""Extracts datapoints from a structured document for a SQL db."""
def __init__(
self,
llm: LLM,
schema_extract_prompt: BasePromptTemplate,
output_parser: OUTPUT_PARSER_TYPE,
sql_database: SQLDatabase,
table_name: Optional[str] = None,
table: Optional[Table] = None,
ref_doc_id_column: Optional[str] = None,
) -> None:
"""Initialize params."""
super().__init__(llm, schema_extract_prompt, output_parser)
self._sql_database = sql_database
# currently the user must specify a table info
if table_name is None and table is None:
raise ValueError("table_name must be specified")
self._table_name = table_name or cast(Table, table).name
if table is None:
table_name = cast(str, table_name)
table = self._sql_database.metadata_obj.tables[table_name]
# if ref_doc_id_column is specified, then we need to check that
# it is a valid column in the table
col_names = [c.name for c in table.c]
if ref_doc_id_column is not None and ref_doc_id_column not in col_names:
raise ValueError(
f"ref_doc_id_column {ref_doc_id_column} not in table {table_name}"
)
self.ref_doc_id_column = ref_doc_id_column
# then store python types of each column
self._col_types_map: Dict[str, type] = {
c.name: table.c[c.name].type.python_type for c in table.c
}
def _get_col_types_map(self) -> Dict[str, type]:
"""Get col types map for schema."""
return self._col_types_map
def _get_schema_text(self) -> str:
"""Insert datapoint into index."""
return self._sql_database.get_single_table_info(self._table_name)
def _insert_datapoint(self, datapoint: StructDatapoint) -> None:
"""Insert datapoint into index."""
datapoint_dict = datapoint.to_dict()["fields"]
self._sql_database.insert_into_table(
self._table_name, cast(Dict[Any, Any], datapoint_dict)
)
| SQLStructDatapointExtractor |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 17506,
"end": 17736
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("BUSINESS", "BUSINESS_PLUS", "FREE", "TIERED_PER_SEAT", "UNLIMITED")
| OrgCreateAuditEntryBillingPlan |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_reflection.py | {
"start": 10226,
"end": 11123
} | class ____:
def funcy(self):
pass
def test_can_proxy_functions_with_mixed_args_and_varargs():
def foo(a, *args):
return (a, args)
@proxies(foo)
def bar(*args, **kwargs):
return foo(*args, **kwargs)
assert bar(1, 2) == (1, (2,))
def test_can_delegate_to_a_function_with_no_positional_args():
def foo(a, b):
return (a, b)
@proxies(foo)
def bar(**kwargs):
return foo(**kwargs)
assert bar(2, 1) == (2, 1)
@pytest.mark.parametrize(
"func,args,expected",
[
(lambda: None, (), None),
(lambda a: a**2, (2,), 4),
(lambda *a: a, [1, 2, 3], (1, 2, 3)),
],
)
def test_can_proxy_lambdas(func, args, expected):
@proxies(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
assert wrapped.__name__ == "<lambda>"
assert wrapped(*args) == expected
| Container |
python | scipy__scipy | scipy/stats/_qmc.py | {
"start": 27527,
"end": 38393
} | class ____(ABC):
"""A generic Quasi-Monte Carlo sampler class meant for subclassing.
QMCEngine is a base class to construct a specific Quasi-Monte Carlo
sampler. It cannot be used directly as a sampler.
Parameters
----------
d : int
Dimension of the parameter space.
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
.. versionchanged:: 1.15.0
As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
transition from use of `numpy.random.RandomState` to
`numpy.random.Generator`, this keyword was changed from `seed` to
`rng`. For an interim period, both keywords will continue to work, although
only one may be specified at a time. After the interim period, function
calls using the `seed` keyword will emit warnings. Following a
deprecation period, the `seed` keyword will be removed.
Notes
-----
By convention samples are distributed over the half-open interval
``[0, 1)``. Instances of the class can access the attributes: ``d`` for
the dimension; and ``rng`` for the random number generator.
**Subclassing**
When subclassing `QMCEngine` to create a new sampler, ``__init__`` and
``random`` must be redefined.
* ``__init__(d, rng=None)``: at least fix the dimension. If the sampler
does not take advantage of a ``rng`` (deterministic methods like
Halton), this parameter can be omitted.
* ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
is used for parallelism. See `Halton` for example.
Optionally, two other methods can be overwritten by subclasses:
* ``reset``: Reset the engine to its original state.
* ``fast_forward``: If the sequence is deterministic (like Halton
sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.
Examples
--------
To create a random sampler based on ``np.random.random``, we would do the
following:
>>> from scipy.stats import qmc
>>> class RandomEngine(qmc.QMCEngine):
... def __init__(self, d, rng=None):
... super().__init__(d=d, rng=rng)
...
...
... def _random(self, n=1, *, workers=1):
... return self.rng.random((n, self.d))
...
...
... def reset(self):
... super().__init__(d=self.d, rng=self.rng_seed)
... return self
...
...
... def fast_forward(self, n):
... self.random(n)
... return self
After subclassing `QMCEngine` to define the sampling strategy we want to
use, we can create an instance to sample from.
>>> engine = RandomEngine(2)
>>> engine.random(5)
array([[0.22733602, 0.31675834], # random
[0.79736546, 0.67625467],
[0.39110955, 0.33281393],
[0.59830875, 0.18673419],
[0.67275604, 0.94180287]])
We can also reset the state of the generator and resample again.
>>> _ = engine.reset()
>>> engine.random(5)
array([[0.22733602, 0.31675834], # random
[0.79736546, 0.67625467],
[0.39110955, 0.33281393],
[0.59830875, 0.18673419],
[0.67275604, 0.94180287]])
"""
@abstractmethod
@_transition_to_rng('seed', replace_doc=False)
def __init__(
self,
d: IntNumber,
*,
optimization: Literal["random-cd", "lloyd"] | None = None,
rng: SeedType = None
) -> None:
self._initialize(d, optimization=optimization, rng=rng)
# During SPEC 7 transition:
# `__init__` has to be wrapped with @_transition_to_rng decorator
# because it is public. Subclasses previously called `__init__`
# directly, but this was problematic because arguments passed to
# subclass `__init__` as `seed` would get passed to superclass
# `__init__` as `rng`, rejecting `RandomState` arguments.
def _initialize(
self,
d: IntNumber,
*,
optimization: Literal["random-cd", "lloyd"] | None = None,
rng: SeedType = None
) -> None:
if not np.issubdtype(type(d), np.integer) or d < 0:
raise ValueError('d must be a non-negative integer value')
self.d = d
if isinstance(rng, np.random.Generator):
# Spawn a Generator that we can own and reset.
self.rng = _rng_spawn(rng, 1)[0]
else:
# Create our instance of Generator, does not need spawning
# Also catch RandomState which cannot be spawned
self.rng = check_random_state(rng)
self.rng_seed = copy.deepcopy(self.rng)
self.num_generated = 0
config = {
# random-cd
"n_nochange": 100,
"n_iters": 10_000,
"rng": self.rng,
# lloyd
"tol": 1e-5,
"maxiter": 10,
"qhull_options": None,
}
self._optimization = optimization
self.optimization_method = _select_optimizer(optimization, config)
@abstractmethod
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
...
def random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw `n` in the half-open interval ``[0, 1)``.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space.
Default is 1.
workers : int, optional
Only supported with `Halton`.
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Default is 1. It becomes faster
than one worker for `n` greater than :math:`10^3`.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
sample = self._random(n, workers=workers)
if self.optimization_method is not None:
sample = self.optimization_method(sample)
self.num_generated += n
return sample
def integers(
self,
l_bounds: "npt.ArrayLike",
*,
u_bounds: "npt.ArrayLike | None" = None,
n: IntNumber = 1,
endpoint: bool = False,
workers: IntNumber = 1
) -> np.ndarray:
r"""
Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
(exclusive), or if endpoint=True, `l_bounds` (inclusive) to
`u_bounds` (inclusive).
Parameters
----------
l_bounds : int or array-like of ints
Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
in which case this parameter is 0 and this value is used for
`u_bounds`).
u_bounds : int or array-like of ints, optional
If provided, one above the largest (signed) integer to be drawn
(see above for behavior if ``u_bounds=None``).
If array-like, must contain integer values.
n : int, optional
Number of samples to generate in the parameter space.
Default is 1.
endpoint : bool, optional
If true, sample from the interval ``[l_bounds, u_bounds]`` instead
of the default ``[l_bounds, u_bounds)``. Defaults is False.
workers : int, optional
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Only supported when using `Halton`
Default is 1.
Returns
-------
sample : array_like (n, d)
QMC sample.
Notes
-----
It is safe to just use the same ``[0, 1)`` to integer mapping
with QMC that you would use with MC. You still get unbiasedness,
a strong law of large numbers, an asymptotically infinite variance
reduction and a finite sample variance bound.
To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds,
the following transformation is used:
.. math::
\text{floor}((b - a) \cdot \text{sample} + a)
"""
if u_bounds is None:
u_bounds = l_bounds
l_bounds = 0
u_bounds = np.atleast_1d(u_bounds)
l_bounds = np.atleast_1d(l_bounds)
if endpoint:
u_bounds = u_bounds + 1
if (not np.issubdtype(l_bounds.dtype, np.integer) or
not np.issubdtype(u_bounds.dtype, np.integer)):
message = ("'u_bounds' and 'l_bounds' must be integers or"
" array-like of integers")
raise ValueError(message)
if isinstance(self, Halton):
sample = self.random(n=n, workers=workers)
else:
sample = self.random(n=n)
sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)
sample = np.floor(sample).astype(np.int64)
return sample
def reset(self) -> "QMCEngine":
"""Reset the engine to base state.
Returns
-------
engine : QMCEngine
Engine reset to its base state.
"""
rng = copy.deepcopy(self.rng_seed)
self.rng = check_random_state(rng)
self.num_generated = 0
return self
def fast_forward(self, n: IntNumber) -> "QMCEngine":
"""Fast-forward the sequence by `n` positions.
Parameters
----------
n : int
Number of points to skip in the sequence.
Returns
-------
engine : QMCEngine
Engine reset to its base state.
"""
self.random(n=n)
return self
| QMCEngine |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/check/builder.py | {
"start": 909,
"end": 4698
} | class ____(NamedTuple):
"""Utility class for managing references to global and local namespaces.
These namespaces are passed to ForwardRef._evaluate to resolve the actual
type from a string.
"""
global_ns: dict
local_ns: dict
lazy_imports: dict
@staticmethod
def capture_from_frame(
depth: int,
*,
add_to_local_ns: Optional[Mapping[str, Any]] = None,
) -> "EvalContext":
"""Capture the global and local namespaces via the stack frame.
Args:
depth: which stack frame to reference, with depth 0 being the callsite.
add_to_local_ns: A mapping of additional values to update the local namespace with.
"""
ctx_frame = sys._getframe(depth + 1) # noqa # surprisingly not costly
# copy to not mess up frame data
global_ns = ctx_frame.f_globals.copy()
local_ns = ctx_frame.f_locals.copy()
if add_to_local_ns:
local_ns.update(add_to_local_ns)
return EvalContext(
global_ns=global_ns,
local_ns=local_ns,
lazy_imports={},
)
@staticmethod
@contextmanager
def contextual_namespace(ns: Mapping[str, type]):
token = _contextual_ns.set(ns)
try:
yield
finally:
_contextual_ns.reset(token)
def update_from_frame(self, depth: int):
# Update the global and local namespaces with symbols from the target frame
ctx_frame = sys._getframe(depth + 1) # noqa # surprisingly not costly
self.global_ns.update(ctx_frame.f_globals)
self.local_ns.update(ctx_frame.f_locals)
def register_lazy_import(self, type_name: str, module: str):
invariant(
self.lazy_imports.get(type_name, module) == module,
f"Conflict in lazy imports for type {type_name}, tried to overwrite "
f"{self.lazy_imports.get(type_name)} with {module}.",
)
self.lazy_imports[type_name] = module
def get_merged_ns(self):
return {
**_contextual_ns.get(),
**self.global_ns,
**self.local_ns,
}
def eval_forward_ref(self, ref: ForwardRef) -> Optional[type]:
if ref.__forward_arg__ in self.lazy_imports:
# if we are going to add a lazy import for the type,
# return a placeholder to grab the name from
return type(ref.__forward_arg__, (_LazyImportPlaceholder,), {})
try:
if sys.version_info <= (3, 9):
return ref._evaluate( # noqa # type: ignore
globalns=self.get_merged_ns(),
localns={},
)
elif sys.version_info < (3, 12, 4):
return ref._evaluate( # noqa
globalns=self.get_merged_ns(),
localns={},
recursive_guard=frozenset(),
)
else: # type_params added in 3.12.4
return ref._evaluate( # noqa
globalns=self.get_merged_ns(),
localns={},
recursive_guard=frozenset(),
type_params=(),
)
except NameError as e:
raise CheckError(
f"Unable to resolve {ref}, could not map string name to actual type using captured frames. "
f"Use Annotated['{ref.__forward_arg__}', ImportFrom('module.to.import.from')] to create a lazy import."
) from e
def compile_fn(self, body: str, fn_name: str) -> Callable:
local_ns = {}
exec(
body,
self.get_merged_ns(),
local_ns,
)
return local_ns[fn_name]
T = TypeVar("T")
| EvalContext |
python | gevent__gevent | src/gevent/monkey/_patch_thread_common.py | {
"start": 2982,
"end": 13253
} | class ____:
# Description of the hang:
# There is an incompatibility with patching 'thread' and the 'multiprocessing' module:
# The problem is that multiprocessing.queues.Queue uses a half-duplex multiprocessing.Pipe,
# which is implemented with os.pipe() and _multiprocessing.Connection. os.pipe isn't patched
# by gevent, as it returns just a fileno. _multiprocessing.Connection is an internal implementation
# class implemented in C, which exposes a 'poll(timeout)' method; under the covers, this issues a
# (blocking) select() call: hence the need for a real thread. Except for that method, we could
# almost replace Connection with gevent.fileobject.SocketAdapter, plus a trivial
# patch to os.pipe (below). Sigh, so close. (With a little work, we could replicate that method)
# import os
# import fcntl
# os_pipe = os.pipe
# def _pipe():
# r, w = os_pipe()
# fcntl.fcntl(r, fcntl.F_SETFL, os.O_NONBLOCK)
# fcntl.fcntl(w, fcntl.F_SETFL, os.O_NONBLOCK)
# return r, w
# os.pipe = _pipe
gevent_threading_mod = None
gevent_thread_mod = None
thread_mod = None
threading_mod = None
orig_current_thread = None
main_thread = None
orig_shutdown = None
def __init__(self, threading=True, _threading_local=True, Event=True, logging=True,
existing_locks=True,
_warnings=None):
self.threading = threading
self.threading_local = _threading_local
self.Event = Event
self.logging = logging
self.existing_locks = existing_locks
self.warnings = _warnings
def __call__(self):
# The 'threading' module copies some attributes from the
# thread module the first time it is imported. If we patch 'thread'
# before that happens, then we store the wrong values in 'saved',
# So if we're going to patch threading, we either need to import it
# before we patch thread, or manually clean up the attributes that
# are in trouble. The latter is tricky because of the different names
# on different versions.
self.threading_mod = __import__('threading')
# Capture the *real* current thread object before
# we start returning DummyThread objects, for comparison
# to the main thread.
self.orig_current_thread = self.threading_mod.current_thread()
self.main_thread = self.threading_mod.main_thread()
self.orig_shutdown = self.threading_mod._shutdown
gevent_thread_mod, thread_mod = _patch_module('thread',
_warnings=self.warnings,
_notify_did_subscribers=False)
if self.threading:
self.patch_threading_event_logging_existing_locks()
if self.threading_local:
self.patch__threading_local()
if self.threading:
self.patch_active_threads()
# Issue 18808 changes the nature of Thread.join() to use
# locks. This means that a greenlet spawned in the main thread
# (which is already running) cannot wait for the main thread---it
# hangs forever. We patch around this if possible. See also
# gevent.threading.
already_patched = is_object_patched('threading', '_shutdown')
if self.orig_current_thread == self.threading_mod.main_thread() and not already_patched:
self.patch_threading_shutdown_on_main_thread_not_already_patched()
self.patch_main_thread_cleanup()
elif not already_patched:
self.patch_shutdown_not_on_main_thread()
from gevent import events
_notify_patch(events.GeventDidPatchModuleEvent('thread',
gevent_thread_mod,
thread_mod))
if self.gevent_threading_mod is not None:
_notify_patch(events.GeventDidPatchModuleEvent('threading',
self.gevent_threading_mod,
self.threading_mod))
def patch_threading_event_logging_existing_locks(self):
self.gevent_threading_mod, patched_mod = _patch_module(
'threading',
_warnings=self.warnings,
_notify_did_subscribers=False)
assert patched_mod is self.threading_mod
if self.Event:
self.patch_event()
if self.existing_locks:
_patch_existing_locks(self.threading_mod)
if self.logging and 'logging' in sys.modules:
self.patch_logging()
def patch_event(self):
from gevent.event import Event
from .api import patch_item
patch_item(self.threading_mod, 'Event', Event)
# Python 2 had `Event` as a function returning
# the private class `_Event`. Some code may be relying
# on that.
if hasattr(self.threading_mod, '_Event'):
patch_item(self.threading_mod, '_Event', Event)
def patch_logging(self):
from .api import patch_item
logging = __import__('logging')
patch_item(logging, '_lock', self.threading_mod.RLock())
for wr in logging._handlerList:
# In py26, these are actual handlers, not weakrefs
handler = wr() if callable(wr) else wr
if handler is None:
continue
if not hasattr(handler, 'lock'):
raise TypeError("Unknown/unsupported handler %r" % handler)
handler.lock = self.threading_mod.RLock()
def patch__threading_local(self):
_threading_local = __import__('_threading_local')
from gevent.local import local
from .api import patch_item
patch_item(_threading_local, 'local', local)
def patch_active_threads(self):
raise NotImplementedError
def patch_threading_shutdown_on_main_thread_not_already_patched(self):
raise NotImplementedError
def patch_main_thread_cleanup(self):
# We create a bit of a reference cycle here,
# so main_thread doesn't get to be collected in a timely way.
# Not good. Take it out of dangling so we don't get
# warned about it.
main_thread = self.main_thread
self.threading_mod._dangling.remove(main_thread)
# Patch up the ident of the main thread to match. This
# matters if threading was imported before monkey-patching
# thread
oldid = main_thread.ident
main_thread._ident = self.threading_mod.get_ident()
if oldid in self.threading_mod._active:
self.threading_mod._active[main_thread.ident] = self.threading_mod._active[oldid]
if oldid != main_thread.ident:
del self.threading_mod._active[oldid]
def patch_shutdown_not_on_main_thread(self):
_queue_warning("Monkey-patching not on the main thread; "
"threading.main_thread().join() will hang from a greenlet",
self.warnings)
from .api import patch_item
main_thread = self.main_thread
threading_mod = self.threading_mod
get_ident = self.threading_mod.get_ident
orig_shutdown = self.orig_shutdown
def _shutdown():
# We've patched get_ident but *did not* patch the
# main_thread.ident value. Beginning in Python 3.9.8
# and then later releases (3.10.1, probably), the
# _main_thread object is only _stop() if the ident of
# the current thread (the *real* main thread) matches
# the ident of the _main_thread object. But without doing that,
# the main thread's shutdown lock (threading._shutdown_locks) is never
# removed *or released*, thus hanging the interpreter.
# XXX: There's probably a better way to do this. Probably need to take a
# step back and look at the whole picture.
main_thread._ident = get_ident()
try:
orig_shutdown()
except LoopExit: # pragma: no cover
pass
patch_item(threading_mod, '_shutdown', orig_shutdown)
patch_item(threading_mod, '_shutdown', _shutdown)
@staticmethod # Static to be sure we don't accidentally capture `self` and keep it alive
def _make_existing_non_main_thread_join_func(thread, thread_greenlet, threading_mod):
from time import time
from gevent.hub import sleep
# TODO: This is almost the algorithm that the 3.13 _ThreadHandle class
# employs. UNIFY them.
def join(timeout=None):
end = None
if threading_mod.current_thread() is thread:
raise RuntimeError("Cannot join current thread")
if thread_greenlet is not None and thread_greenlet.dead:
return
# You may ask: Why not call thread_greenlet.join()?
# Well, in the one case we actually have a greenlet, it's the
# low-level greenlet.greenlet object for the main thread, which
# doesn't have a join method.
#
# You may ask: Why not become the main greenlet's *parent*
# so you can get notified when it finishes? Because you can't
# create a greenlet cycle (the current greenlet is a descendent
# of the parent), and nor can you set a greenlet's parent to None,
# so there can only ever be one greenlet with a parent of None: the main
# greenlet, the one we need to watch.
#
# You may ask: why not swizzle out the problematic lock on the main thread
# into a gevent friendly lock? Well, the interpreter actually depends on that
# for the main thread in threading._shutdown; see below.
if not thread.is_alive():
return
if timeout:
end = time() + timeout
while thread.is_alive():
if end is not None and time() > end:
return
sleep(0.01)
return join
| BasePatcher |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 22152,
"end": 22416
} | class ____(_DgConfigErrorRecord):
key: str
expected_type_str: str
@property
def message(self) -> str:
return f"Missing required value for `{self.key}`:\n Expected: {self.expected_type_str}"
@record
| _DgConfigMissingRequiredFieldErrorRecord |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 5289,
"end": 6161
} | class ____(BaseStrategy):
COMMAND = 'hostname'
def __init__(self, module):
super(CommandStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return 'UNKNOWN'
def set_permanent_hostname(self, name):
pass
| CommandStrategy |
python | tensorflow__tensorflow | tensorflow/python/training/input_test.py | {
"start": 41462,
"end": 67886
} | class ____(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with ops.Graph().as_default(), self.cached_session():
# Two threads, the first generates (0..69, "a").
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(batch_size, len(results[0]))
self.assertEqual(batch_size, len(results[2]))
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreads%s saw both count: %s",
"Dict" if use_dict else "", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
def testMismatchedDictKeys(self):
with ops.Graph().as_default(), self.assertRaisesRegex(
ValueError, "must have the same keys"):
inp.batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8)
def testTwoThreadsDynamicPad(self):
with ops.Graph().as_default(), self.cached_session():
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(
["a"],
math_ops.cast(array_ops_stack.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops_stack.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size, None), batched[1].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
self.assertEqual(2, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPad saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testTwoThreadsSmallerBatch(self):
with ops.Graph().as_default(), self.cached_session():
extra_elements = 2
# Two threads, the first generates (0..69, "a").
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsSmallerBatch saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testTwoThreadsDynamicPadSmallerBatch(self):
with ops.Graph().as_default(), self.cached_session():
extra_elements = 2
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(
["a"],
math_ops.cast(array_ops_stack.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops_stack.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, None), batched[1].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[1]), 2 * extra_elements)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend(results[0][i] for i in which_a)
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPadSmallerBatch saw both count: %s",
saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch_join(
[[counter, "string"]],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with ops.Graph().as_default(), self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegex(ValueError, "Cannot infer Tensor's rank"):
inp.batch_join([[x]], batch_size=2)
def testSingleElementDict(self):
with ops.Graph().as_default():
x = inp.batch_join([{"c": [12, 12]}], batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with ops.Graph().as_default(), self.cached_session():
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch_join(
[to_batch] * num_threads,
keep_input,
batch_size,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[0], 2),)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[1].values, 2),)
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
with ops.Graph().as_default():
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegex(ValueError,
"`keep_input` cannot be a vector"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegex(ValueError,
"must be known at graph construction"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]],
keep_input=True,
batch_size=2,
enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]],
keep_input=[True, False],
batch_size=2,
enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]],
keep_input=True,
batch_size=2,
enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]],
keep_input=[True, False],
batch_size=2,
enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchCorrectValues(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch_join([[sparse]],
keep_input=keep,
batch_size=1,
enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
| BatchJoinTest |
python | falconry__falcon | falcon/media/multipart.py | {
"start": 1947,
"end": 11216
} | class ____:
"""Represents a body part in a multipart form in a WSGI application.
Note:
:class:`BodyPart` is meant to be instantiated directly only by the
:class:`MultipartFormHandler` parser.
"""
_content_disposition: tuple[str, dict[str, str]] | None = None
_data: bytes | None = None
_filename: UnsetOr[str | None] = _UNSET
_media: UnsetOr[Any] = _UNSET
_name: UnsetOr[str | None] = _UNSET
stream: PyBufferedReader
"""File-like input object for reading the body part of the
multipart form request, if any. This object provides direct access
to the server's data stream and is non-seekable. The stream is
automatically delimited according to the multipart stream boundary.
With the exception of being buffered to keep track of the boundary,
the wrapped body part stream interface and behavior mimic
:attr:`Request.bounded_stream <falcon.Request.bounded_stream>`.
Reading the whole part content:
.. code:: python
data = part.stream.read()
This is also safe:
.. code:: python
doc = yaml.safe_load(part.stream)
"""
def __init__(
self,
stream: PyBufferedReader,
headers: dict[bytes, bytes],
parse_options: MultipartParseOptions,
):
self.stream = stream
self._headers = headers
self._parse_options = parse_options
def get_data(self) -> bytes:
"""Return the body part content bytes.
The maximum number of bytes that may be read is configurable via
:class:`.MultipartParseOptions`, and a :class:`.MultipartParseError` is
raised if the body part is larger that this size.
The size limit guards against reading unexpectedly large amount of data
into memory by referencing :attr:`data` and :attr:`text` properties
that build upon this method.
For large bodies, such as attached files, use the input :attr:`stream`
directly.
Note:
Calling this method the first time will consume the part's input
stream. The result is cached for subsequent access, and follow-up
calls will just retrieve the cached content.
Returns:
bytes: The body part content.
"""
if self._data is None:
max_size = self._parse_options.max_body_part_buffer_size + 1
self._data = self.stream.read(max_size)
if len(self._data) >= max_size:
raise MultipartParseError(description='body part is too large')
return self._data
def get_text(self) -> str | None:
"""Return the body part content decoded as a text string.
Text is decoded from the part content (as returned by
:meth:`~.get_data`) using the charset specified in the `Content-Type`
header, or, if omitted, the
:data:`default charset <MultipartParseOptions.default_charset>`.
The charset must be supported by Python's ``bytes.decode()``
function. The list of standard encodings (charsets) supported by the
Python 3 standard library can be found `here
<https://docs.python.org/3/library/codecs.html#standard-encodings>`__.
If decoding fails due to invalid `data` bytes (for the specified
encoding), or the specified encoding itself is unsupported, a
:class:`.MultipartParseError` will be raised when referencing this
property.
Note:
As this method builds upon :meth:`~.get_data`, it will consume the
part's input stream in the same way.
Returns:
str: The part decoded as a text string provided the part is
encoded as ``text/plain``, ``None`` otherwise.
"""
content_type, options = parse_header(self.content_type)
if content_type != 'text/plain':
return None
charset = options.get('charset', self._parse_options.default_charset)
try:
return self.data.decode(charset)
except (ValueError, LookupError) as err:
raise MultipartParseError(
description='invalid text or charset: {}'.format(charset)
) from err
@property
def content_type(self) -> str:
"""Value of the Content-Type header.
When the header is missing returns the multipart form default ``text/plain``.
"""
# NOTE(vytas): RFC 7578, section 4.4.
# Each part MAY have an (optional) "Content-Type" header field, which
# defaults to "text/plain".
value = self._headers.get(b'content-type', b'text/plain')
return value.decode('ascii')
@property
def filename(self) -> str | None:
"""File name if the body part is an attached file, and ``None`` otherwise."""
if self._filename is _UNSET:
if self._content_disposition is None:
value = self._headers.get(b'content-disposition', b'')
self._content_disposition = parse_header(value.decode())
_, params = self._content_disposition
# NOTE(vytas): Supporting filename* as per RFC 5987, as that has
# been spotted in the wild, even though RFC 7578 forbids it.
match = _FILENAME_STAR_RFC5987.match(params.get('filename*', ''))
if match:
charset, filename_raw = match.groups()
try:
self._filename = unquote_to_bytes(filename_raw).decode(charset)
except (ValueError, LookupError) as err:
raise MultipartParseError(
description='invalid text or charset: {}'.format(charset)
) from err
else:
self._filename = params.get('filename')
return self._filename
@property
def secure_filename(self) -> str:
"""The sanitized version of `filename` using only the most common ASCII
characters for maximum portability and safety wrt using this name as a
filename on a regular file system.
If `filename` is empty or unset when referencing this property, an
instance of :class:`.MultipartParseError` will be raised.
See also: :func:`~.secure_filename`
""" # noqa: D205
try:
return misc.secure_filename(
self.filename or '',
max_length=self._parse_options.max_secure_filename_length,
)
except ValueError as ex:
raise MultipartParseError(description=str(ex)) from ex
@property
def name(self) -> str | None:
"""The name parameter of the Content-Disposition header.
The value of the "name" parameter is the original field name from
the submitted HTML form.
.. note::
According to `RFC 7578, section 4.2
<https://tools.ietf.org/html/rfc7578#section-4.2>`__, each part
MUST include a Content-Disposition header field of type
"form-data", where the name parameter is mandatory.
However, Falcon will not raise any error if this parameter is
missing; the property value will be ``None`` in that case.
"""
if self._name is _UNSET:
if self._content_disposition is None:
value = self._headers.get(b'content-disposition', b'')
self._content_disposition = parse_header(value.decode())
_, params = self._content_disposition
self._name = params.get('name')
return self._name
def get_media(self) -> Any:
"""Return a deserialized form of the multipart body part.
When called, this method will attempt to deserialize the body part
stream using the Content-Type header as well as the media-type handlers
configured via :class:`MultipartParseOptions`.
The result will be cached and returned in subsequent calls::
deserialized_media = part.get_media()
Returns:
object: The deserialized media representation.
"""
if self._media is _UNSET:
handler, _, _ = self._parse_options.media_handlers._resolve(
self.content_type, 'text/plain'
)
try:
self._media = handler.deserialize(self.stream, self.content_type, None)
finally:
if handler.exhaust_stream:
self.stream.exhaust()
return self._media
data: bytes = property(get_data) # type: ignore[assignment]
"""Property that acts as a convenience alias for :meth:`~.get_data`.
.. code:: python
# Equivalent to: content = part.get_data()
content = part.data
"""
media: Any = property(get_media)
"""Property that acts as a convenience alias for :meth:`~.get_media`.
.. code:: python
# Equivalent to: deserialized_media = part.get_media()
deserialized_media = req.media
"""
text: str = property(get_text) # type: ignore[assignment]
"""Property that acts as a convenience alias for :meth:`~.get_text`.
.. code:: python
# Equivalent to: decoded_text = part.get_text()
decoded_text = part.text
"""
| BodyPart |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_threading.py | {
"start": 7752,
"end": 8825
} | class ____(_ThreadTest, fixtures.TestBase):
"""test for issue #12302"""
@testing.variation("collection", ["c", "primary_key", "foreign_keys"])
def test_c_collection(self, collection):
dictionary_meta = MetaData()
all_indexes_table = Table(
"all_indexes",
dictionary_meta,
*[Column(f"col{i}", Integer) for i in range(50)],
)
def use_table(results, errors):
for i in range(3):
time.sleep(random.random() * 0.0001)
if collection.c:
all_indexes.c.col35
elif collection.primary_key:
all_indexes.primary_key
elif collection.foreign_keys:
all_indexes.foreign_keys
for j in range(1000):
all_indexes = all_indexes_table.alias("a_indexes")
results, errors = self.run_threaded(
use_table, use_barrier=False, nthreads=5
)
eq_(errors, [])
eq_(len(results), 5)
| FromClauseConcurrencyTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/bucketize_op_test.py | {
"start": 1074,
"end": 2820
} | class ____(test.TestCase):
def testInt(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
def testEmptyFloat(self):
op = math_ops._bucketize(
array_ops.zeros([0, 3], dtype=dtypes.float32), boundaries=[])
expected_out = np.zeros([0, 3], dtype=np.float32)
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
def testFloat(self):
op = math_ops._bucketize(
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
def test2DInput(self):
op = math_ops._bucketize(
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def testInvalidBoundariesOrder(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
self.evaluate(op)
def testBoundariesNotList(self):
with self.assertRaisesRegex(TypeError, "Expected list.*"):
math_ops._bucketize(constant_op.constant([-5, 0]), boundaries=0)
if __name__ == "__main__":
test.main()
| BucketizationOpTest |
python | huggingface__transformers | src/transformers/models/lilt/modeling_lilt.py | {
"start": 13618,
"end": 14840
} | class ____(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.self = LiltSelfAttention(config, layer_idx=layer_idx)
self.output = LiltSelfOutput(config)
ori_hidden_size = config.hidden_size
config.hidden_size = config.hidden_size // config.channel_shrink_ratio
self.layout_output = LiltSelfOutput(config)
config.hidden_size = ori_hidden_size
def forward(
self,
hidden_states: torch.Tensor,
layout_inputs: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
layout_inputs,
attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0][0], hidden_states)
layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs)
outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
| LiltAttention |
python | huggingface__transformers | src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py | {
"start": 2026,
"end": 24990
} | class ____(BaseImageProcessor):
r"""
Constructs a MobileNetV2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = MobileNetV2ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 256}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_reduce_labels = do_reduce_labels
# Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
# Avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(
self,
image: ImageInput,
do_reduce_labels: bool,
do_resize: bool,
do_rescale: bool,
do_center_crop: bool,
do_normalize: bool,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
rescale_factor: Optional[float] = None,
crop_size: Optional[dict[str, int]] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image=image,
do_reduce_labels=False,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
input_data_format=input_data_format,
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_reduce_labels: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(
image=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
resample=PILImageResampling.NEAREST,
do_rescale=False,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_normalize=False,
image_mean=None,
image_std=None,
input_data_format=input_data_format,
)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if segmentation_maps is not None and not valid_images(segmentation_maps):
raise ValueError(
"Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor"
)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": images}
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_mask(
segmentation_map=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
do_center_crop=do_center_crop,
crop_size=crop_size,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
data["labels"] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileNetV2
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileNetV2ImageProcessor"]
| MobileNetV2ImageProcessor |
python | pytorch__pytorch | test/onnx/exporter/test_api.py | {
"start": 455,
"end": 592
} | class ____(torch.nn.Module):
def forward(self, x, b):
y = x + b
z = y.relu()
return (y, z)
| SampleModelTwoInputs |
python | vyperlang__vyper | vyper/compiler/input_bundle.py | {
"start": 8743,
"end": 9687
} | class ____(InputBundle):
def __init__(self, archive: "ZipFile"):
assert archive.testzip() is None
self.archive = archive
sp_str = archive.read("MANIFEST/searchpaths").decode("utf-8")
search_paths = [PurePath(p) for p in sp_str.splitlines()]
super().__init__(search_paths)
def _normalize_path(self, path: PurePath) -> PurePath:
return _normpath(path)
def _load_from_path(self, resolved_path: PurePath, original_path: PurePath) -> CompilerInput:
# zipfile.BadZipFile: File is not a zip file
try:
value = self.archive.read(resolved_path.as_posix()).decode("utf-8")
except KeyError:
# zipfile literally raises KeyError if the file is not there
raise _NotFound(resolved_path)
source_id = super()._generate_source_id(resolved_path)
return FileInput(source_id, original_path, resolved_path, value)
| ZipInputBundle |
python | tensorflow__tensorflow | tensorflow/security/fuzzing/py/annotation_types.py | {
"start": 798,
"end": 3069
} | class ____:
pass
def _create_dtype_wrapper(name, underlying_dtype: _dtypes.DType):
return type(name, (DTypeAnnotation,), {"underlying_dtype": underlying_dtype})
BFloat16 = _create_dtype_wrapper("BFloat16", _dtypes.bfloat16)
Bool = _create_dtype_wrapper("Bool", _dtypes.bool)
Complex128 = _create_dtype_wrapper("Complex128", _dtypes.complex128)
Complex64 = _create_dtype_wrapper("Complex64", _dtypes.complex64)
Float8e4m3fn = _create_dtype_wrapper("Float8e4m3fn", _dtypes.float8_e4m3fn)
Float8e5m2 = _create_dtype_wrapper("Float8e5m2", _dtypes.float8_e5m2)
Float8e4m3fnuz = _create_dtype_wrapper(
"Float8e4m3fnuz", _dtypes.float8_e4m3fnuz
)
Float8e4m3b11fnuz = _create_dtype_wrapper(
"Float8e4m3b11fnuz", _dtypes.float8_e4m3b11fnuz
)
Float8e5m2fnuz = _create_dtype_wrapper(
"Float8e5m2fnuz", _dtypes.float8_e5m2fnuz
)
Float4e2m1fn = _create_dtype_wrapper("Float4e2m1fn", _dtypes.float4_e2m1fn)
Float16 = _create_dtype_wrapper("Float16", _dtypes.float16)
Float32 = _create_dtype_wrapper("Float32", _dtypes.float32)
Float64 = _create_dtype_wrapper("Float64", _dtypes.float64)
Half = _create_dtype_wrapper("Half", _dtypes.float16)
Int2 = _create_dtype_wrapper("Int2", _dtypes.int2)
Int4 = _create_dtype_wrapper("Int4", _dtypes.int4)
Int8 = _create_dtype_wrapper("Int8", _dtypes.int8)
Int16 = _create_dtype_wrapper("Int16", _dtypes.int16)
Int32 = _create_dtype_wrapper("Int32", _dtypes.int32)
Int64 = _create_dtype_wrapper("Int64", _dtypes.int64)
UInt2 = _create_dtype_wrapper("UInt2", _dtypes.uint2)
UInt4 = _create_dtype_wrapper("UInt4", _dtypes.uint4)
UInt8 = _create_dtype_wrapper("UInt8", _dtypes.uint8)
UInt16 = _create_dtype_wrapper("UInt16", _dtypes.uint16)
UInt32 = _create_dtype_wrapper("UInt32", _dtypes.uint32)
UInt64 = _create_dtype_wrapper("UInt64", _dtypes.uint64)
QInt8 = _create_dtype_wrapper("QInt8", _dtypes.qint8)
QInt16 = _create_dtype_wrapper("QInt16", _dtypes.qint16)
QInt32 = _create_dtype_wrapper("QInt32", _dtypes.qint32)
QUInt16 = _create_dtype_wrapper("QUInt16", _dtypes.quint16)
QUInt8 = _create_dtype_wrapper("QUInt8", _dtypes.quint8)
Resource = _create_dtype_wrapper("Resource", _dtypes.resource)
String = _create_dtype_wrapper("String", _dtypes.string)
Variant = _create_dtype_wrapper("Variant", _dtypes.variant)
| DTypeAnnotation |
python | pydantic__pydantic | tests/test_json.py | {
"start": 12253,
"end": 19203
} | class ____(BaseModel):
value: int
nested: Optional[Model] = None
"""
)
M = module.Model
assert M(value=1, nested=M(value=2)).model_dump_json(exclude_none=True) == '{"value":1,"nested":{"value":2}}'
def test_resolve_ref_schema_recursive_model():
class Model(BaseModel):
mini_me: Union['Model', None]
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
json_schema = super().__get_pydantic_json_schema__(core_schema, handler)
json_schema = handler.resolve_ref_schema(json_schema)
json_schema['examples'] = [{'foo': {'mini_me': None}}]
return json_schema
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'Model': {
'examples': [{'foo': {'mini_me': None}}],
'properties': {'mini_me': {'anyOf': [{'$ref': '#/$defs/Model'}, {'type': 'null'}]}},
'required': ['mini_me'],
'title': 'Model',
'type': 'object',
}
},
'$ref': '#/$defs/Model',
}
def test_custom_json_encoder_config():
class Model(BaseModel):
x: timedelta
y: Decimal
z: date
model_config = ConfigDict(
json_encoders={timedelta: lambda v: f'{v.total_seconds():0.3f}s', Decimal: lambda v: 'a decimal'}
)
assert json.loads(Model(x=123, y=5, z='2032-06-01').model_dump_json()) == {
'x': '123.000s',
'y': 'a decimal',
'z': '2032-06-01',
}
def test_custom_iso_timedelta():
class Model(BaseModel):
x: timedelta
model_config = ConfigDict(json_encoders={timedelta: lambda _: 'P0DT0H2M3.000000S'})
m = Model(x=321)
assert json.loads(m.model_dump_json()) == {'x': 'P0DT0H2M3.000000S'}
def test_json_encoders_config_simple_inheritance():
"""json_encoders is not "inheritable", this is different than v1 but much simpler"""
class Parent(BaseModel):
dt: datetime = datetime.now()
timedt: timedelta = timedelta(hours=100)
model_config = ConfigDict(json_encoders={timedelta: lambda _: 'parent_encoder'})
class Child(Parent):
model_config = ConfigDict(json_encoders={datetime: lambda _: 'child_encoder'})
# insert_assert(Child().model_dump())
assert json.loads(Child().model_dump_json()) == {'dt': 'child_encoder', 'timedt': 'P4DT4H'}
def test_custom_iso_timedelta_annotated():
class Model(BaseModel):
# the json_encoders config applies to the type but the annotation overrides it
y: timedelta
x: Annotated[timedelta, AfterValidator(lambda x: x), PlainSerializer(lambda _: 'P0DT0H1M2.000000S')]
model_config = ConfigDict(json_encoders={timedelta: lambda _: 'P0DT0H2M3.000000S'})
m = Model(x=321, y=456)
assert json.loads(m.model_dump_json()) == {'x': 'P0DT0H1M2.000000S', 'y': 'P0DT0H2M3.000000S'}
def test_json_encoders_on_model() -> None:
"""Make sure that applying json_encoders to a BaseModel
does not edit its schema in place.
"""
class Model(BaseModel):
x: int
class Outer1(BaseModel):
m: Model
model_config = ConfigDict(json_encoders={Model: lambda x: 'encoded!'})
class Outer2(BaseModel):
m: Model
class Outermost(BaseModel):
inner: Union[Outer1, Outer2]
m = Outermost(inner=Outer1(m=Model(x=1)))
# insert_assert(m.model_dump())
assert json.loads(m.model_dump_json()) == {'inner': {'m': 'encoded!'}}
m = Outermost(inner=Outer2(m=Model(x=1)))
# insert_assert(m.model_dump())
assert json.loads(m.model_dump_json()) == {'inner': {'m': {'x': 1}}}
def test_json_encoders_not_used_for_python_dumps() -> None:
class Model(BaseModel):
x: int
model_config = ConfigDict(json_encoders={int: lambda x: 'encoded!'})
m = Model(x=1)
assert m.model_dump() == {'x': 1}
assert m.model_dump_json() == '{"x":"encoded!"}'
def test_json_encoders_types() -> None:
class MyEnum(Enum):
A = 'a'
B = 'b'
class A(BaseModel):
a: MyEnum
b: list[int]
c: Decimal
model_config = ConfigDict(
json_encoders={Enum: lambda val: val.name, list[int]: lambda val: 'list!', Decimal: lambda val: 'decimal!'}
)
m = A(a=MyEnum.A, b=[1, 2, 3], c=Decimal('0'))
assert m.model_dump_json() == '{"a":"A","b":"list!","c":"decimal!"}'
assert m.model_dump() == {'a': MyEnum.A, 'b': [1, 2, 3], 'c': Decimal('0')}
@pytest.mark.parametrize(
'float_value,encoded_str',
[
(float('inf'), 'Infinity'),
(float('-inf'), '-Infinity'),
(float('nan'), 'NaN'),
],
)
def test_json_inf_nan_allow(float_value, encoded_str):
class R(RootModel[float]):
model_config = ConfigDict(ser_json_inf_nan='strings')
r = R(float_value)
r_encoded = f'"{encoded_str}"'
assert r.model_dump_json() == r_encoded
if math.isnan(float_value):
assert math.isnan(R.model_validate_json(r_encoded).root)
else:
assert R.model_validate_json(r_encoded) == r
class M(BaseModel):
f: float
model_config = R.model_config
m = M(f=float_value)
m_encoded = f'{{"f":{r_encoded}}}'
assert m.model_dump_json() == m_encoded
if math.isnan(float_value):
assert math.isnan(M.model_validate_json(m_encoded).f)
else:
assert M.model_validate_json(m_encoded) == m
def test_json_bytes_base64_round_trip():
class R(RootModel[bytes]):
model_config = ConfigDict(ser_json_bytes='base64', val_json_bytes='base64')
r = R(b'hello')
r_encoded = '"aGVsbG8="'
assert r.model_dump_json() == r_encoded
assert R.model_validate_json(r_encoded) == r
class M(BaseModel):
key: bytes
model_config = R.model_config
m = M(key=b'hello')
m_encoded = f'{{"key":{r_encoded}}}'
assert m.model_dump_json() == m_encoded
assert M.model_validate_json(m_encoded) == m
def test_json_bytes_hex_round_trip():
class R(RootModel[bytes]):
model_config = ConfigDict(ser_json_bytes='hex', val_json_bytes='hex')
r = R(b'hello')
r_encoded = '"68656c6c6f"'
assert r.model_dump_json() == r_encoded
assert R.model_validate_json(r_encoded) == r
class M(BaseModel):
key: bytes
model_config = R.model_config
m = M(key=b'hello')
m_encoded = f'{{"key":{r_encoded}}}'
assert m.model_dump_json() == m_encoded
assert M.model_validate_json(m_encoded) == m
# Complete tests exist in pydantic-core:
def test_json_ensure_ascii() -> None:
ta = TypeAdapter(str)
assert ta.dump_json('à', ensure_ascii=True) == b'"\\u00e0"'
class Model(BaseModel):
f: str
assert Model(f='à').model_dump_json(ensure_ascii=True) == '{"f":"\\u00e0"}'
| Model |
python | redis__redis-py | redis/asyncio/lock.py | {
"start": 354,
"end": 12801
} | class ____:
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
lua_release = None
lua_extend = None
lua_reacquire = None
# KEYS[1] - lock name
# ARGV[1] - token
# return 1 if the lock was released, otherwise 0
LUA_RELEASE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('del', KEYS[1])
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - additional milliseconds
# ARGV[3] - "0" if the additional time should be added to the lock's
# existing ttl or "1" if the existing ttl should be replaced
# return 1 if the locks time was extended, otherwise 0
LUA_EXTEND_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
local expiration = redis.call('pttl', KEYS[1])
if not expiration then
expiration = 0
end
if expiration < 0 then
return 0
end
local newttl = ARGV[2]
if ARGV[3] == "0" then
newttl = ARGV[2] + expiration
end
redis.call('pexpire', KEYS[1], newttl)
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - milliseconds
# return 1 if the locks time was reacquired, otherwise 0
LUA_REACQUIRE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('pexpire', KEYS[1], ARGV[2])
return 1
"""
def __init__(
self,
redis: Union["Redis", "RedisCluster"],
name: Union[str, bytes, memoryview],
timeout: Optional[float] = None,
sleep: float = 0.1,
blocking: bool = True,
blocking_timeout: Optional[Number] = None,
thread_local: bool = True,
raise_on_release_error: bool = True,
):
"""
Create a new Lock instance named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock in seconds.
By default, it will remain locked until release() is called.
``timeout`` can be specified as a float or integer, both representing
the number of seconds to wait.
``sleep`` indicates the amount of time to sleep in seconds per loop
iteration when the lock is in blocking mode and another client is
currently holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
``raise_on_release_error`` indicates whether to raise an exception when
the lock is no longer owned when exiting the context manager. By default,
this is True, meaning an exception will be raised. If False, the warning
will be logged and the exception will be suppressed.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage.
"""
self.redis = redis
self.name = name
self.timeout = timeout
self.sleep = sleep
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
self.local = threading.local() if self.thread_local else SimpleNamespace()
self.raise_on_release_error = raise_on_release_error
self.local.token = None
self.register_scripts()
def register_scripts(self):
cls = self.__class__
client = self.redis
if cls.lua_release is None:
cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
if cls.lua_extend is None:
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
if cls.lua_reacquire is None:
cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
async def __aenter__(self):
if await self.acquire():
return self
raise LockError("Unable to acquire lock within the time specified")
async def __aexit__(self, exc_type, exc_value, traceback):
try:
await self.release()
except LockError:
if self.raise_on_release_error:
raise
logger.warning(
"Lock was unlocked or no longer owned when exiting context manager."
)
async def acquire(
self,
blocking: Optional[bool] = None,
blocking_timeout: Optional[Number] = None,
token: Optional[Union[str, bytes]] = None,
):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
``blocking_timeout`` specifies the maximum number of seconds to
wait trying to acquire the lock.
``token`` specifies the token value to be used. If provided, token
must be a bytes object or a string that can be encoded to a bytes
object with the default encoding. If a token isn't specified, a UUID
will be generated.
"""
sleep = self.sleep
if token is None:
token = uuid.uuid1().hex.encode()
else:
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
token = encoder.encode(token)
if blocking is None:
blocking = self.blocking
if blocking_timeout is None:
blocking_timeout = self.blocking_timeout
stop_trying_at = None
if blocking_timeout is not None:
stop_trying_at = asyncio.get_running_loop().time() + blocking_timeout
while True:
if await self.do_acquire(token):
self.local.token = token
return True
if not blocking:
return False
next_try_at = asyncio.get_running_loop().time() + sleep
if stop_trying_at is not None and next_try_at > stop_trying_at:
return False
await asyncio.sleep(sleep)
async def do_acquire(self, token: Union[str, bytes]) -> bool:
if self.timeout:
# convert to milliseconds
timeout = int(self.timeout * 1000)
else:
timeout = None
if await self.redis.set(self.name, token, nx=True, px=timeout):
return True
return False
async def locked(self) -> bool:
"""
Returns True if this key is locked by any process, otherwise False.
"""
return await self.redis.get(self.name) is not None
async def owned(self) -> bool:
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = await self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and stored_token == self.local.token
def release(self) -> Awaitable[None]:
"""Releases the already acquired lock"""
expected_token = self.local.token
if expected_token is None:
raise LockError(
"Cannot release a lock that's not owned or is already unlocked.",
lock_name=self.name,
)
self.local.token = None
return self.do_release(expected_token)
async def do_release(self, expected_token: bytes) -> None:
if not bool(
await self.lua_release(
keys=[self.name], args=[expected_token], client=self.redis
)
):
raise LockNotOwnedError("Cannot release a lock that's no longer owned")
def extend(
self, additional_time: Number, replace_ttl: bool = False
) -> Awaitable[bool]:
"""
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
``replace_ttl`` if False (the default), add `additional_time` to
the lock's existing ttl. If True, replace the lock's ttl with
`additional_time`.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return self.do_extend(additional_time, replace_ttl)
async def do_extend(self, additional_time, replace_ttl) -> bool:
additional_time = int(additional_time * 1000)
if not bool(
await self.lua_extend(
keys=[self.name],
args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
client=self.redis,
)
):
raise LockNotOwnedError("Cannot extend a lock that's no longer owned")
return True
def reacquire(self) -> Awaitable[bool]:
"""
Resets a TTL of an already acquired lock back to a timeout value.
"""
if self.local.token is None:
raise LockError("Cannot reacquire an unlocked lock")
if self.timeout is None:
raise LockError("Cannot reacquire a lock with no timeout")
return self.do_reacquire()
async def do_reacquire(self) -> bool:
timeout = int(self.timeout * 1000)
if not bool(
await self.lua_reacquire(
keys=[self.name], args=[self.local.token, timeout], client=self.redis
)
):
raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned")
return True
| Lock |
python | PyCQA__bandit | tests/functional/test_baseline.py | {
"start": 1023,
"end": 14773
} | class ____(testtools.TestCase):
"""Functional tests for Bandit baseline.
This set of tests is used to verify that the baseline comparison handles
finding and comparing results appropriately. The only comparison is the
number of candidates per file, meaning that any candidates found may
already exist in the baseline. In this case, all candidates are flagged
and a user will need to investigate the candidates related to that file.
"""
def setUp(self):
super().setUp()
self.examples_path = "examples"
self.baseline_commands = ["bandit", "-r"]
self.baseline_report_file = "baseline_report.json"
def _run_bandit_baseline(self, target_directory, baseline_file):
"""A helper method to run bandit baseline
This method will run the bandit baseline test provided an existing
baseline report and the target directory containing the content to be
tested.
:param target_directory: Directory containing content to be compared
:param baseline_file: File containing an existing baseline report
:return The baseline test results and return code
"""
cmds = self.baseline_commands + ["-b", baseline_file, target_directory]
process = subprocess.Popen(
cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = process.communicate()
return (stdout.decode("utf-8"), process.poll())
def _create_baseline(self, baseline_paired_files):
"""A helper method to create a baseline to use during baseline test
This method will run bandit to create an initial baseline that can
then be used during the bandit baseline test. Since the file contents
of the baseline report can be extremely dynamic and difficult to create
ahead of time, we do this at runtime to reduce the risk of missing
something. To do this, we must temporary replace the file contents
with different code which will produce the proper baseline results to
be used during the baseline test.
:param baseline_paired_files A dictionary based set of files for which
to create the baseline report with. For each key file, a value file
is provided, which contains content to use in place of the key file
when the baseline report is created initially.
:return The target directory for the baseline test and the return code
of the bandit run to help determine whether the baseline report was
populated
"""
target_directory = self.useFixture(fixtures.TempDir()).path
baseline_results = os.path.join(
target_directory, self.baseline_report_file
)
for key_file, value_file in baseline_paired_files.items():
shutil.copy(
os.path.join(self.examples_path, value_file),
os.path.join(target_directory, key_file),
)
cmds = self.baseline_commands + [
"-f",
"json",
"-o",
baseline_results,
target_directory,
]
process = subprocess.Popen(
cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = process.communicate()
return_code = process.poll()
for key_file, value_file in baseline_paired_files.items():
shutil.copy(
os.path.join(self.examples_path, key_file),
os.path.join(target_directory, key_file),
)
return (target_directory, return_code)
def test_no_new_candidates(self):
"""Tests when there are no new candidates
Test that bandit returns no issues found, as there are no new
candidates found compared with those found from the baseline.
"""
baseline_report_files = {
"new_candidates-all.py": "new_candidates-all.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found results
self.assertEqual(1, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were no results (no candidates found)
self.assertEqual(0, return_code)
self.assertIn(new_candidates_all_total_lines, return_value)
self.assertIn(new_candidates_skip_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(baseline_no_issues_found, return_value)
def test_no_existing_no_new_candidates(self):
"""Tests when there are no new or existing candidates
Test file with no existing candidates from baseline and no new
candidates.
"""
baseline_report_files = {"okay.py": "okay.py"}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found nothing
self.assertEqual(0, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were no results (no candidates found)
self.assertEqual(0, return_code)
self.assertIn("Total lines of code: 1", return_value)
self.assertIn(new_candidates_no_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(baseline_no_issues_found, return_value)
def test_no_existing_with_new_candidates(self):
"""Tests when there are new candidates and no existing candidates
Test that bandit returns issues found in file that had no existing
candidates from baseline but now contain candidates.
"""
baseline_report_files = {
"new_candidates-all.py": "new_candidates-none.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found nothing
self.assertEqual(0, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were results (candidates found)
self.assertEqual(1, return_code)
self.assertIn(new_candidates_all_total_lines, return_value)
self.assertIn(new_candidates_skip_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(xml_sax_issue_id, return_value)
self.assertIn(yaml_load_issue_id, return_value)
self.assertIn(shell_issue_id, return_value)
# candidate #1
self.assertIn(candidate_example_one, return_value)
# candidate #3
self.assertIn(candidate_example_three, return_value)
# candidate #5
self.assertIn(candidate_example_five, return_value)
def test_existing_and_new_candidates(self):
"""Tests when tere are new candidates and existing candidates
Test that bandit returns issues found in file with existing
candidates. The new candidates should be returned in this case.
"""
baseline_report_files = {
"new_candidates-all.py": "new_candidates-some.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found results
self.assertEqual(1, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were results (candidates found)
self.assertEqual(1, return_code)
self.assertIn(new_candidates_all_total_lines, return_value)
self.assertIn(new_candidates_skip_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(xml_sax_issue_id, return_value)
self.assertIn(yaml_load_issue_id, return_value)
# candidate #3
self.assertIn(candidate_example_three, return_value)
# candidate #5
self.assertIn(candidate_example_five, return_value)
def test_no_new_candidates_include_nosec(self):
"""Test to check nosec references with no new candidates
Test that nosec references are included during a baseline test, which
would normally be ignored. In this test case, there are no new
candidates even while including the nosec references.
"""
self.baseline_commands.append("--ignore-nosec")
baseline_report_files = {
"new_candidates-all.py": "new_candidates-all.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found results
self.assertEqual(1, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were no results (candidates found)
self.assertEqual(0, return_code)
self.assertIn(new_candidates_all_total_lines, return_value)
self.assertIn(new_candidates_no_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(baseline_no_issues_found, return_value)
def test_new_candidates_include_nosec_only_nosecs(self):
"""Test to check nosec references with new only nosec candidates
Test that nosec references are included during a baseline test, which
would normally be ignored. In this test case, there are new candidates
which are specifically nosec references.
"""
self.baseline_commands.append("--ignore-nosec")
baseline_report_files = {
"new_candidates-nosec.py": "new_candidates-none.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found nothing
self.assertEqual(0, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were results (candidates found)
self.assertEqual(1, return_code)
self.assertIn(new_candidates_some_total_lines, return_value)
self.assertIn(new_candidates_no_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(xml_sax_issue_id, return_value)
self.assertIn(yaml_load_issue_id, return_value)
self.assertIn(shell_issue_id, return_value)
# candidate #2
self.assertIn(candidate_example_two, return_value)
# candidate #4
self.assertIn(candidate_example_four, return_value)
# candidate #6
self.assertIn(candidate_example_six, return_value)
def test_new_candidates_include_nosec_new_nosecs(self):
"""Test to check nosec references with new candidates, including nosecs
Test that nosec references are included during a baseline test, which
would normally be ignored. In this test case, there are new candidates
that also includes new nosec references as well.
"""
self.baseline_commands.append("--ignore-nosec")
baseline_report_files = {
"new_candidates-all.py": "new_candidates-none.py"
}
target_directory, baseline_code = self._create_baseline(
baseline_report_files
)
# assert the initial baseline found nothing
self.assertEqual(0, baseline_code)
baseline_report = os.path.join(
target_directory, self.baseline_report_file
)
return_value, return_code = self._run_bandit_baseline(
target_directory, baseline_report
)
# assert there were results (candidates found)
self.assertEqual(1, return_code)
self.assertIn(new_candidates_all_total_lines, return_value)
self.assertIn(new_candidates_no_nosec_lines, return_value)
self.assertIn(baseline_no_skipped_files, return_value)
self.assertIn(xml_sax_issue_id, return_value)
self.assertIn(yaml_load_issue_id, return_value)
self.assertIn(shell_issue_id, return_value)
# candidate #1
self.assertIn(candidate_example_one, return_value)
# candidate #2
self.assertIn(candidate_example_two, return_value)
# candidate #3
self.assertIn(candidate_example_three, return_value)
# candidate #4
self.assertIn(candidate_example_four, return_value)
# candidate #5
self.assertIn(candidate_example_five, return_value)
# candidate #6
self.assertIn(candidate_example_six, return_value)
| BaselineFunctionalTests |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_for_update.py | {
"start": 770,
"end": 6436
} | class ____(fixtures.DeclarativeMappedTest):
__backend__ = True
__only_on__ = "mysql", "mariadb"
__requires__ = ("mysql_for_update",)
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
bs = relationship("B")
__table_args__ = {
"mysql_engine": "InnoDB",
"mariadb_engine": "InnoDB",
}
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
x = Column(Integer)
y = Column(Integer)
__table_args__ = {
"mysql_engine": "InnoDB",
"mariadb_engine": "InnoDB",
}
@classmethod
def insert_data(cls, connection):
A = cls.classes.A
B = cls.classes.B
# all the x/y are < 10
s = Session(connection)
s.add_all(
[
A(x=5, y=5, bs=[B(x=4, y=4), B(x=2, y=8), B(x=7, y=1)]),
A(x=7, y=5, bs=[B(x=4, y=4), B(x=5, y=8)]),
]
)
s.commit()
@contextlib.contextmanager
def run_test(self):
connection = testing.db.connect()
connection.exec_driver_sql("set innodb_lock_wait_timeout=1")
try:
yield Session(bind=connection)
finally:
connection.rollback()
connection.close()
def _assert_a_is_locked(self, should_be_locked):
A = self.classes.A
with testing.db.begin() as alt_trans:
alt_trans.exec_driver_sql("set innodb_lock_wait_timeout=1")
# set x/y > 10
try:
alt_trans.execute(update(A).values(x=15, y=19))
except (
exc.InternalError,
exc.OperationalError,
exc.DatabaseError,
) as err:
assert "Lock wait timeout exceeded" in str(err)
assert should_be_locked
else:
assert not should_be_locked
def _assert_b_is_locked(self, should_be_locked):
B = self.classes.B
with testing.db.begin() as alt_trans:
alt_trans.exec_driver_sql("set innodb_lock_wait_timeout=1")
# set x/y > 10
try:
alt_trans.execute(update(B).values(x=15, y=19))
except (
exc.InternalError,
exc.OperationalError,
exc.DatabaseError,
) as err:
assert "Lock wait timeout exceeded" in str(err)
assert should_be_locked
else:
assert not should_be_locked
def test_basic_lock(self):
A = self.classes.A
with self.run_test() as s:
s.query(A).with_for_update().all()
# test our fixture
self._assert_a_is_locked(True)
def test_basic_not_lock(self):
A = self.classes.A
with self.run_test() as s:
s.query(A).all()
# test our fixture
self._assert_a_is_locked(False)
def test_joined_lock_subquery(self):
A = self.classes.A
with self.run_test() as s:
s.query(A).options(joinedload(A.bs)).with_for_update().first()
# test for issue #4246, should be locked
self._assert_a_is_locked(True)
self._assert_b_is_locked(True)
def test_joined_lock_subquery_inner_for_update(self):
A = self.classes.A
B = self.classes.B
with self.run_test() as s:
q = s.query(A).with_for_update().subquery()
s.query(q).join(B).all()
# FOR UPDATE is inside the subquery, should be locked
self._assert_a_is_locked(True)
# FOR UPDATE is inside the subquery, B is not locked
self._assert_b_is_locked(False)
def test_joined_lock_subquery_inner_for_update_outer(self):
A = self.classes.A
B = self.classes.B
with self.run_test() as s:
q = s.query(A).with_for_update().subquery()
s.query(q).join(B).with_for_update().all()
# FOR UPDATE is inside the subquery, should be locked
self._assert_a_is_locked(True)
# FOR UPDATE is also outside the subquery, B is locked
self._assert_b_is_locked(True)
def test_joined_lock_subquery_order_for_update_outer(self):
A = self.classes.A
B = self.classes.B
with self.run_test() as s:
q = s.query(A).order_by(A.id).subquery()
s.query(q).join(B).with_for_update().all()
# FOR UPDATE is inside the subquery, should not be locked
self._assert_a_is_locked(False)
self._assert_b_is_locked(True)
def test_joined_lock_no_subquery(self):
A = self.classes.A
with self.run_test() as s:
s.query(A).options(joinedload(A.bs)).with_for_update().all()
# no subquery, should be locked
self._assert_a_is_locked(True)
self._assert_b_is_locked(True)
@testing.requires.mysql_for_update_read
def test_for_update_read(self):
A = self.classes.A
with self.run_test() as s:
s.query(A).options(joinedload(A.bs)).with_for_update(
read=True
).all()
# no subquery, should be locked
self._assert_a_is_locked(True)
self._assert_b_is_locked(True)
| MySQLForUpdateLockingTest |
python | getsentry__sentry | tests/sentry/integrations/slack/tasks/test_send_notifications_on_activity.py | {
"start": 324,
"end": 2238
} | class ____(TestCase):
def setUp(self) -> None:
self.mock_send_activity_notifications = mock.MagicMock()
mock_method = mock.MagicMock()
self.mock_send_activity_notifications.apply_async = mock_method
def test_ignores_uncreated_events(self) -> None:
with mock.patch(
"sentry.integrations.slack.tasks.send_notifications_on_activity.send_activity_notifications_to_slack_threads",
self.mock_send_activity_notifications,
):
foo = mock.MagicMock()
foo.id = 123
activity_created_receiver(foo, False)
self.mock_send_activity_notifications.apply_async.assert_not_called()
def test_calls_async_function(self) -> None:
with mock.patch(
"sentry.integrations.slack.tasks.send_notifications_on_activity.send_activity_notifications_to_slack_threads",
self.mock_send_activity_notifications,
):
mock_activity = mock.MagicMock()
mock_activity.id = 123
activity_created_receiver(mock_activity, True)
self.mock_send_activity_notifications.apply_async.assert_called_with(
kwargs={"activity_id": mock_activity.id}
)
def test_receiver_signal(self) -> None:
with mock.patch(
"sentry.integrations.slack.tasks.send_notifications_on_activity.send_activity_notifications_to_slack_threads",
self.mock_send_activity_notifications,
):
new_activity = Activity.objects.create(
project=self.project,
group=self.group,
type=ActivityType.NOTE.value,
data={},
user_id=self.user.id,
)
self.mock_send_activity_notifications.apply_async.assert_called_with(
kwargs={"activity_id": new_activity.id}
)
| TestActivityCreatedReceiver |
python | apache__airflow | devel-common/src/tests_common/test_utils/stream_capture_manager.py | {
"start": 4844,
"end": 5042
} | class ____(StreamCaptureManager):
"""Convenience class for stdout-only capture."""
def __init__(self):
super().__init__(capture_stdout=True, capture_stderr=False)
| StdoutCaptureManager |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 9078,
"end": 9467
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "RunGroupNotFoundError"
run_id = graphene.NonNull(graphene.String)
def __init__(self, run_id):
super().__init__()
self.run_id = check.str_param(run_id, "run_id")
self.message = f"Run group of run {run_id} could not be found."
| GrapheneRunGroupNotFoundError |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_einsum.py | {
"start": 46617,
"end": 53335
} | class ____(TestCase):
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
operands = [string]
terms = string.split("->")[0].split(",")
for term in terms:
dims = [size_dict[x] for x in term]
operands.append(np.random.rand(*dims))
return operands
def assert_path_equal(self, comp, benchmark):
# Checks if list of tuples are equivalent
ret = len(comp) == len(benchmark)
assert_(ret)
for pos in range(len(comp) - 1):
ret &= isinstance(comp[pos + 1], tuple)
ret &= comp[pos + 1] == benchmark[pos + 1]
assert_(ret)
def test_memory_contraints(self):
# Ensure memory constraints are satisfied
outer_test = self.build_operands("a,b,c->abc")
path, path_str = np.einsum_path(*outer_test, optimize=("greedy", 0))
self.assert_path_equal(path, ["einsum_path", (0, 1, 2)])
path, path_str = np.einsum_path(*outer_test, optimize=("optimal", 0))
self.assert_path_equal(path, ["einsum_path", (0, 1, 2)])
long_test = self.build_operands("acdf,jbje,gihb,hfac")
path, path_str = np.einsum_path(*long_test, optimize=("greedy", 0))
self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)])
path, path_str = np.einsum_path(*long_test, optimize=("optimal", 0))
self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)])
def test_long_paths(self):
# Long complex cases
# Long test 1
long_test1 = self.build_operands("acdf,jbje,gihb,hfac,gfac,gifabc,hfac")
path, path_str = np.einsum_path(*long_test1, optimize="greedy")
self.assert_path_equal(
path, ["einsum_path", (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]
)
path, path_str = np.einsum_path(*long_test1, optimize="optimal")
self.assert_path_equal(
path, ["einsum_path", (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]
)
# Long test 2
long_test2 = self.build_operands("chd,bde,agbc,hiad,bdi,cgh,agdb")
path, path_str = np.einsum_path(*long_test2, optimize="greedy")
self.assert_path_equal(
path, ["einsum_path", (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]
)
path, path_str = np.einsum_path(*long_test2, optimize="optimal")
self.assert_path_equal(
path, ["einsum_path", (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]
)
def test_edge_paths(self):
# Difficult edge cases
# Edge test1
edge_test1 = self.build_operands("eb,cb,fb->cef")
path, path_str = np.einsum_path(*edge_test1, optimize="greedy")
self.assert_path_equal(path, ["einsum_path", (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test1, optimize="optimal")
self.assert_path_equal(path, ["einsum_path", (0, 2), (0, 1)])
# Edge test2
edge_test2 = self.build_operands("dd,fb,be,cdb->cef")
path, path_str = np.einsum_path(*edge_test2, optimize="greedy")
self.assert_path_equal(path, ["einsum_path", (0, 3), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test2, optimize="optimal")
self.assert_path_equal(path, ["einsum_path", (0, 3), (0, 1), (0, 1)])
# Edge test3
edge_test3 = self.build_operands("bca,cdb,dbf,afc->")
path, path_str = np.einsum_path(*edge_test3, optimize="greedy")
self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test3, optimize="optimal")
self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)])
# Edge test4
edge_test4 = self.build_operands("dcc,fce,ea,dbf->ab")
path, path_str = np.einsum_path(*edge_test4, optimize="greedy")
self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test4, optimize="optimal")
self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)])
# Edge test5
edge_test4 = self.build_operands(
"a,ac,ab,ad,cd,bd,bc->", size_dict={"a": 20, "b": 20, "c": 20, "d": 20}
)
path, path_str = np.einsum_path(*edge_test4, optimize="greedy")
self.assert_path_equal(path, ["einsum_path", (0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = np.einsum_path(*edge_test4, optimize="optimal")
self.assert_path_equal(path, ["einsum_path", (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
# Test explicit path handling
path_test = self.build_operands("dcc,fce,ea,dbf->ab")
path, path_str = np.einsum_path(*path_test, optimize=False)
self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)])
path, path_str = np.einsum_path(*path_test, optimize=True)
self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 1), (0, 1)])
exp_path = ["einsum_path", (0, 2), (0, 2), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_internal_trace(self):
# gh-20962
path_test = self.build_operands("cab,cdd->ab")
exp_path = ["einsum_path", (1,), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_invalid(self):
path_test = self.build_operands("ab,bc,cd,de->ae")
exp_path = ["einsum_path", (2, 3), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
path_test = self.build_operands("a,a,a->a")
exp_path = ["einsum_path", (1,), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
def test_spaces(self):
# gh-10794
arr = np.array([[1]])
for sp in itertools.product(["", " "], repeat=4):
# no error for any spacing
np.einsum("{}...a{}->{}...a{}".format(*sp), arr)
| TestEinsumPath |
python | kubernetes-client__python | kubernetes/client/api/apiextensions_v1_api.py | {
"start": 543,
"end": 125123
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_custom_resource_definition(self, body, **kwargs): # noqa: E501
"""create_custom_resource_definition # noqa: E501
create a CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_custom_resource_definition(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_custom_resource_definition_with_http_info(body, **kwargs) # noqa: E501
def create_custom_resource_definition_with_http_info(self, body, **kwargs): # noqa: E501
"""create_custom_resource_definition # noqa: E501
create a CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_custom_resource_definition_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_custom_resource_definition`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_custom_resource_definition(self, **kwargs): # noqa: E501
"""delete_collection_custom_resource_definition # noqa: E501
delete collection of CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_custom_resource_definition(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_custom_resource_definition_with_http_info(**kwargs) # noqa: E501
def delete_collection_custom_resource_definition_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_custom_resource_definition # noqa: E501
delete collection of CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_custom_resource_definition_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_custom_resource_definition(self, name, **kwargs): # noqa: E501
"""delete_custom_resource_definition # noqa: E501
delete a CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_custom_resource_definition(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_custom_resource_definition_with_http_info(name, **kwargs) # noqa: E501
def delete_custom_resource_definition_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_custom_resource_definition # noqa: E501
delete a CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_custom_resource_definition_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_custom_resource_definition`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_custom_resource_definition(self, **kwargs): # noqa: E501
"""list_custom_resource_definition # noqa: E501
list or watch objects of kind CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_custom_resource_definition(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinitionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_custom_resource_definition_with_http_info(**kwargs) # noqa: E501
def list_custom_resource_definition_with_http_info(self, **kwargs): # noqa: E501
"""list_custom_resource_definition # noqa: E501
list or watch objects of kind CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_custom_resource_definition_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinitionList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinitionList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_custom_resource_definition(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition # noqa: E501
partially update the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501
def patch_custom_resource_definition_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition # noqa: E501
partially update the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_custom_resource_definition`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_custom_resource_definition`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_custom_resource_definition_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_custom_resource_definition_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_custom_resource_definition_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_custom_resource_definition_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_custom_resource_definition(self, name, **kwargs): # noqa: E501
"""read_custom_resource_definition # noqa: E501
read the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_custom_resource_definition(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_custom_resource_definition_with_http_info(name, **kwargs) # noqa: E501
def read_custom_resource_definition_with_http_info(self, name, **kwargs): # noqa: E501
"""read_custom_resource_definition # noqa: E501
read the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_custom_resource_definition_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_custom_resource_definition`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_custom_resource_definition_status(self, name, **kwargs): # noqa: E501
"""read_custom_resource_definition_status # noqa: E501
read status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_custom_resource_definition_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_custom_resource_definition_status_with_http_info(name, **kwargs) # noqa: E501
def read_custom_resource_definition_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_custom_resource_definition_status # noqa: E501
read status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_custom_resource_definition_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_custom_resource_definition_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_custom_resource_definition_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_custom_resource_definition(self, name, body, **kwargs): # noqa: E501
"""replace_custom_resource_definition # noqa: E501
replace the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_custom_resource_definition(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501
def replace_custom_resource_definition_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_custom_resource_definition # noqa: E501
replace the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_custom_resource_definition_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_custom_resource_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_custom_resource_definition`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_custom_resource_definition`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
"""replace_custom_resource_definition_status # noqa: E501
replace status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_custom_resource_definition_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_custom_resource_definition_status # noqa: E501
replace status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_custom_resource_definition_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CustomResourceDefinition (required)
:param V1CustomResourceDefinition body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_custom_resource_definition_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_custom_resource_definition_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_custom_resource_definition_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1CustomResourceDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| ApiextensionsV1Api |
python | django__django | tests/files/tests.py | {
"start": 15048,
"end": 19215
} | class ____(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe() raises FileExistsError if the destination file
# exists and allow_overwrite is False.
msg = r"Destination file .* exists and allow_overwrite is False\."
with self.assertRaisesRegex(FileExistsError, msg):
file_move_safe(self.file_a, self.file_b, allow_overwrite=False)
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(
file_move_safe(self.file_a, self.file_b, allow_overwrite=True)
)
os.close(handle_a)
os.close(handle_b)
def test_file_move_permissionerror(self):
"""
file_move_safe() ignores PermissionError thrown by copystat() and
copymode().
For example, PermissionError can be raised when the destination
filesystem is CIFS, or when relabel is disabled by SELinux across
filesystems.
"""
permission_error = PermissionError(errno.EPERM, "msg")
os_error = OSError("msg")
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
handle_c, self.file_c = tempfile.mkstemp()
try:
# This exception is required to reach the copystat() call in
# file_safe_move().
with mock.patch("django.core.files.move.os.rename", side_effect=OSError()):
# An error besides PermissionError isn't ignored.
with mock.patch(
"django.core.files.move.copystat", side_effect=os_error
):
with self.assertRaises(OSError):
file_move_safe(self.file_a, self.file_b, allow_overwrite=True)
# When copystat() throws PermissionError, copymode() error
# besides PermissionError isn't ignored.
with mock.patch(
"django.core.files.move.copystat", side_effect=permission_error
):
with mock.patch(
"django.core.files.move.copymode", side_effect=os_error
):
with self.assertRaises(OSError):
file_move_safe(
self.file_a, self.file_b, allow_overwrite=True
)
# PermissionError raised by copystat() is ignored.
with mock.patch(
"django.core.files.move.copystat", side_effect=permission_error
):
self.assertIsNone(
file_move_safe(self.file_a, self.file_b, allow_overwrite=True)
)
# PermissionError raised by copymode() is ignored too.
with mock.patch(
"django.core.files.move.copymode", side_effect=permission_error
):
self.assertIsNone(
file_move_safe(
self.file_c, self.file_b, allow_overwrite=True
)
)
finally:
os.close(handle_a)
os.close(handle_b)
os.close(handle_c)
def test_file_move_ensure_truncation(self):
with tempfile.NamedTemporaryFile(delete=False) as src:
src.write(b"content")
src_name = src.name
self.addCleanup(
lambda: os.remove(src_name) if os.path.exists(src_name) else None
)
with tempfile.NamedTemporaryFile(delete=False) as dest:
dest.write(b"This is a longer content.")
dest_name = dest.name
self.addCleanup(os.remove, dest_name)
with mock.patch("django.core.files.move.os.rename", side_effect=OSError()):
file_move_safe(src_name, dest_name, allow_overwrite=True)
with open(dest_name, "rb") as f:
content = f.read()
self.assertEqual(content, b"content")
| FileMoveSafeTests |
python | PrefectHQ__prefect | src/prefect/concurrency/v1/services.py | {
"start": 583,
"end": 715
} | class ____(Exception):
"""Raised when an error occurs while acquiring concurrency slots."""
| ConcurrencySlotAcquisitionServiceError |
python | numba__numba | numba/core/types/containers.py | {
"start": 18268,
"end": 18885
} | class ____(SimpleIteratorType):
def __init__(self, iterable):
self.parent = iterable.parent
self.iterable = iterable
yield_type = iterable.yield_type
name = "iter[{}->{}]".format(iterable.parent, yield_type)
super(ListTypeIteratorType, self).__init__(name, yield_type)
def _sentry_forbidden_types(key, value):
# Forbids List and Set for now
if isinstance(key, (Set, List)):
raise TypingError("{} as key is forbidden".format(key))
if isinstance(value, (Set, List)):
raise TypingError("{} as value is forbidden".format(value))
| ListTypeIteratorType |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 3746,
"end": 4343
} | class ____(TestCase):
"""Tests for ``nth()``"""
def test_basic(self):
"""Make sure the nth item is returned"""
l = range(10)
for i, v in enumerate(l):
self.assertEqual(mi.nth(l, i), v)
def test_default(self):
"""Ensure a default value is returned when nth item not found"""
l = range(3)
self.assertEqual(mi.nth(l, 100, "zebra"), "zebra")
def test_negative_item_raises(self):
"""Ensure asking for a negative item raises an exception"""
self.assertRaises(ValueError, lambda: mi.nth(range(10), -3))
| NthTests |
python | python-markdown__markdown | markdown/extensions/tables.py | {
"start": 8020,
"end": 8739
} | class ____(Extension):
""" Add tables to Markdown. """
def __init__(self, **kwargs):
self.config = {
'use_align_attribute': [False, 'True to use align attribute instead of style.'],
}
""" Default configuration options. """
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add an instance of `TableProcessor` to `BlockParser`. """
if '|' not in md.ESCAPED_CHARS:
md.ESCAPED_CHARS.append('|')
processor = TableProcessor(md.parser, self.getConfigs())
md.parser.blockprocessors.register(processor, 'table', 75)
def makeExtension(**kwargs): # pragma: no cover
return TableExtension(**kwargs)
| TableExtension |
python | google__jax | tests/pallas/mgpu_attention_test.py | {
"start": 1430,
"end": 6236
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if attention_mgpu is None:
self.skipTest("Mosaic GPU not available.")
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_equal("9.0")):
self.skipTest("Only works on GPU with capability sm90a")
self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True))
@parameterized.product(
batch_size=(1, 4),
q_seq_len=(4096,),
kv_seq_len=(4096,),
num_q_and_kv_heads=(
(4, 1), # MQA
(6, 3), # GQA
(4, 4),
), # MHA
head_dim=(64, 128, 256),
blocks=((64, 64), (64, 128), (128, 64)),
attention_impl=(
attention_mgpu.attention,
attention_mgpu.attention_with_pipeline_emitter,
),
save_residuals=(True,),
causal=(True, False,),
)
def test_flash_attention(
self,
batch_size,
q_seq_len,
kv_seq_len,
num_q_and_kv_heads,
head_dim,
blocks,
attention_impl,
save_residuals,
causal,
):
assert cuda_versions is not None
cuda_runtime_version = cuda_versions.cuda_runtime_get_version()
# TODO(pobudzey): Undo when we upgrade to cuda 12.9.1.
if causal and (cuda_runtime_version >= 12080 and cuda_runtime_version < 12091):
self.skipTest("Skipping because of ptxas miscompilation.")
if causal and attention_impl == attention_mgpu.attention_with_pipeline_emitter:
self.skipTest("Pipeline emitter does not support causal attention.")
if head_dim >= 256 and max(blocks) >= 128:
self.skipTest("Head dim too large for block sizes.")
num_q_heads, num_kv_heads = num_q_and_kv_heads
block_q, block_kv = blocks
k1, k2, k3 = jax.random.split(jax.random.key(42), 3)
q = jax.random.normal(k1, (batch_size, q_seq_len, num_q_heads, head_dim), jnp.float16)
k = jax.random.normal(k2, (batch_size, kv_seq_len, num_kv_heads, head_dim), jnp.float16)
v = jax.random.normal(k3, (batch_size, kv_seq_len, num_kv_heads, head_dim), jnp.float16)
out, *res = attention_impl(
q,
k,
v,
attention_mgpu.TuningConfig(
block_q=block_q, block_kv=block_kv, max_concurrent_steps=2, causal=causal
),
save_residuals=save_residuals,
)
out_ref, *res_ref = attention_mgpu.attention_reference(
q, k, v, causal=causal, save_residuals=save_residuals)
np.testing.assert_allclose(out, out_ref, atol=2e-3, rtol=1e-3)
if save_residuals:
(lse,) = res[0]
(lse_ref,) = res_ref[0]
np.testing.assert_allclose(lse, lse_ref, atol=2e-3, rtol=1e-3)
@parameterized.product(
batch_size=(3,),
seq_lens=((512, 512), (3584, 4096)),
num_q_and_kv_heads=(
(4, 4), # MHA
(4, 1), # MQA
(6, 3), # GQA
),
bwd_blocks = (
(64, 64, 64, 64),
(64, 128, 128, 64),
(128, 128, 128, 128),
),
head_dim=(64, 128, 256),
)
def test_bwd_flash_attention(
self,
batch_size,
seq_lens,
num_q_and_kv_heads,
bwd_blocks,
head_dim,
):
num_q_heads, num_kv_heads = num_q_and_kv_heads
kv_seq_len, q_seq_len = seq_lens
block_q_dq, block_kv_dq, block_q_dkv, block_kv_dkv = bwd_blocks
compute_wgs = 2 if head_dim <= 128 else 1
k1, k2, k3 = jax.random.split(jax.random.key(42), 3)
q = jax.random.normal(k1, (batch_size, q_seq_len, num_q_heads, head_dim), jnp.float16)
k = jax.random.normal(k2, (batch_size, kv_seq_len, num_kv_heads, head_dim), jnp.float16)
v = jax.random.normal(k3, (batch_size, kv_seq_len, num_kv_heads, head_dim), jnp.float16)
def f(q, k, v):
return attention_mgpu.attention(
q,
k,
v,
attention_mgpu.TuningConfig(
block_q=block_q_dq, block_kv=block_kv_dq,
max_concurrent_steps=2, compute_wgs_bwd=compute_wgs,
block_q_dkv=block_q_dkv, block_kv_dkv=block_kv_dkv,
block_q_dq=block_q_dq, block_kv_dq=block_kv_dq,
)
).sum()
def f_ref(q, k, v):
return attention_mgpu.attention_reference(q, k, v).sum()
try:
# TODO(pobudzey): Replace with `jtu.check_grads` when it's fixed.
dq, dk, dv = jax.grad(f, argnums=(0, 1, 2))(q, k, v)
dq_ref, dk_ref, dv_ref = jax.grad(f_ref, argnums=(0, 1, 2))(q, k, v)
self.assertAllClose(dq, dq_ref, atol=7e-2)
self.assertAllClose(dk, dk_ref, atol=7e-2)
self.assertAllClose(dv, dv_ref, atol=5e-2)
except ValueError as e:
if "exceeds available shared memory" in e.args[0]:
self.skipTest("Not enough SMEM for this configuration.")
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| FlashAttentionTestCase |
python | huggingface__transformers | src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py | {
"start": 3671,
"end": 5784
} | class ____(nn.Module):
"""This corresponds to the `Block` class in the original implementation.
There are two equivalent implementations:
1) DwConv, LayerNorm (channels_first), Conv, GELU, Conv (all in (N, C, H, W) format)
2) DwConv, Permute, LayerNorm (channels_last), Linear, GELU, Linear, Permute
The authors used (2) as they find it slightly faster in PyTorch.
Args:
config ([`DINOv3ConvNextConfig`]):
Model config.
channels (`int`):
Number of input (and output) channels.
drop_path (`float`):
Drop path rate. Default: 0.0.
"""
def __init__(self, config: DINOv3ConvNextConfig, channels: int, drop_path: float = 0.0):
super().__init__()
self.depthwise_conv = nn.Conv2d(channels, channels, kernel_size=7, padding=3, groups=channels)
self.layer_norm = DINOv3ConvNextLayerNorm(channels, eps=config.layer_norm_eps)
self.pointwise_conv1 = nn.Linear(channels, 4 * channels) # can be seen as a 1x1 conv
self.activation_fn = ACT2FN[config.hidden_act]
self.pointwise_conv2 = nn.Linear(4 * channels, channels) # can be seen as a 1x1 conv
self.gamma = nn.Parameter(torch.full((channels,), config.layer_scale_init_value), requires_grad=True)
self.drop_path = DINOv3ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width)
"""
residual = features
features = self.depthwise_conv(features)
features = features.permute(0, 2, 3, 1) # to channels last
features = self.layer_norm(features)
features = self.pointwise_conv1(features)
features = self.activation_fn(features)
features = self.pointwise_conv2(features)
features = features * self.gamma
features = features.permute(0, 3, 1, 2) # back to channels first
features = residual + self.drop_path(features)
return features
| DINOv3ConvNextLayer |
python | getsentry__sentry | tests/sentry/integrations/github/test_post_install.py | {
"start": 255,
"end": 6032
} | class ____(IntegrationTestCase):
provider = GitHubIntegrationProvider
def setUp(self):
super().setUp()
self.integration = self.create_integration(
organization=self.organization,
provider="github",
name="test-org",
external_id="123456",
metadata={"account_id": "789"},
)
@patch("sentry.integrations.services.repository.repository_service.get_repositories")
@patch("sentry.integrations.tasks.migrate_repo.migrate_repo.apply_async")
@patch("sentry.integrations.github.tasks.link_all_repos.link_all_repos.apply_async")
@patch("sentry.integrations.github.tasks.codecov_account_link.codecov_account_link.apply_async")
@patch("sentry.options.get")
def test_post_install_triggers_codecov_when_app_ids_match(
self,
mock_options_get,
mock_codecov_task,
mock_link_repos,
mock_migrate_repo,
mock_get_repositories,
):
# Set up options to return the matching app ID
mock_options_get.return_value = "app_1"
provider = GitHubIntegrationProvider()
provider.post_install(
integration=self.integration, organization=self.organization, extra={"app_id": "app_1"}
)
mock_codecov_task.assert_called_once_with(
kwargs={
"integration_id": self.integration.id,
"organization_id": self.organization.id,
}
)
@patch("sentry.integrations.services.repository.repository_service.get_repositories")
@patch("sentry.integrations.tasks.migrate_repo.migrate_repo.apply_async")
@patch("sentry.integrations.github.tasks.link_all_repos.link_all_repos.apply_async")
@patch("sentry.integrations.github.tasks.codecov_account_link.codecov_account_link.apply_async")
@patch("sentry.options.get")
def test_post_install_skips_codecov_when_app_ids_dont_match(
self,
mock_options_get,
mock_codecov_task,
mock_link_repos,
mock_migrate_repo,
mock_get_repositories,
):
# Set up options to return a different app ID
mock_options_get.return_value = "different_app_id"
provider = GitHubIntegrationProvider()
provider.post_install(
integration=self.integration, organization=self.organization, extra={"app_id": "app_1"}
)
mock_codecov_task.assert_not_called()
@patch(
"sentry.integrations.models.organization_integration.OrganizationIntegration.objects.filter"
)
@patch("sentry.integrations.services.repository.repository_service.get_repositories")
@patch("sentry.integrations.tasks.migrate_repo.migrate_repo.apply_async")
@patch("sentry.integrations.github.tasks.link_all_repos.link_all_repos.apply_async")
@patch("sentry.integrations.github.tasks.codecov_account_link.codecov_account_link.apply_async")
@patch("sentry.options.get")
def test_post_install_skips_codecov_when_org_integration_missing(
self,
mock_options_get,
mock_codecov_task,
mock_link_repos,
mock_migrate_repo,
mock_get_repositories,
mock_org_integration_filter,
):
mock_options_get.return_value = "app_1"
mock_queryset = MagicMock()
mock_queryset.first.return_value = None
mock_org_integration_filter.return_value = mock_queryset
provider = GitHubIntegrationProvider()
provider.post_install(
integration=self.integration, organization=self.organization, extra={"app_id": "app_1"}
)
mock_codecov_task.assert_not_called()
mock_org_integration_filter.assert_called_once_with(
integration=self.integration, organization_id=self.organization.id
)
@patch("sentry.integrations.services.repository.repository_service.get_repositories")
@patch("sentry.integrations.tasks.migrate_repo.migrate_repo.apply_async")
@patch("sentry.integrations.github.tasks.link_all_repos.link_all_repos.apply_async")
@patch("sentry.integrations.github.tasks.codecov_account_link.codecov_account_link.apply_async")
@patch("sentry.options.get")
def test_post_install_migrates_existing_repos(
self,
mock_options_get,
mock_codecov_task,
mock_link_repos,
mock_migrate_repo,
mock_get_repositories,
):
mock_repo1 = MagicMock()
mock_repo1.id = 1
mock_repo2 = MagicMock()
mock_repo2.id = 2
mock_get_repositories.return_value = [mock_repo1, mock_repo2]
mock_options_get.return_value = "app_1"
provider = GitHubIntegrationProvider()
provider.post_install(
integration=self.integration, organization=self.organization, extra={"app_id": "app_1"}
)
mock_codecov_task.assert_called_once()
assert mock_migrate_repo.call_count == 2
expected_calls = [
call(
kwargs={
"repo_id": 1,
"integration_id": self.integration.id,
"organization_id": self.organization.id,
}
),
call(
kwargs={
"repo_id": 2,
"integration_id": self.integration.id,
"organization_id": self.organization.id,
}
),
]
mock_migrate_repo.assert_has_calls(expected_calls, any_order=True)
mock_link_repos.assert_called_once_with(
kwargs={
"integration_key": "github",
"integration_id": self.integration.id,
"organization_id": self.organization.id,
}
)
| GitHubIntegrationPostInstallTest |
python | conda__conda | conda/models/records.py | {
"start": 7341,
"end": 7740
} | class ____(PathData):
# TODO: sha256 and size_in_bytes should be required for all PathType.hardlink, but not for softlink and directory
sha256 = StringField(required=False, nullable=True)
size_in_bytes = IntegerField(required=False, nullable=True)
inode_paths = ListField(str, required=False, nullable=True)
sha256_in_prefix = StringField(required=False, nullable=True)
| PathDataV1 |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 13421,
"end": 13541
} | class ____(OrderedCmpOp):
pass
# more specific overloads should be registered first
@infer_global(operator.eq)
| CmpOpGe |
python | ray-project__ray | python/ray/serve/_private/application_state.py | {
"start": 2427,
"end": 2598
} | class ____(Enum):
"""Status of the build application task."""
NO_TASK_IN_PROGRESS = 1
IN_PROGRESS = 2
SUCCEEDED = 3
FAILED = 4
@dataclass
| BuildAppStatus |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 96123,
"end": 96477
} | class ____(sgqlc.types.Enum):
"""The possible digest algorithms used to sign SAML requests for an
identity provider.
Enumeration Choices:
* `SHA1`: SHA1
* `SHA256`: SHA256
* `SHA384`: SHA384
* `SHA512`: SHA512
"""
__schema__ = github_schema
__choices__ = ("SHA1", "SHA256", "SHA384", "SHA512")
| SamlDigestAlgorithm |
python | readthedocs__readthedocs.org | readthedocs/storage/s3_storage.py | {
"start": 854,
"end": 1880
} | class ____(OverrideHostnameMixin, BuildMediaStorageMixin, S3Boto3Storage):
"""An AWS S3 Storage backend for build artifacts."""
bucket_name = getattr(settings, "S3_MEDIA_STORAGE_BUCKET", None)
override_hostname = getattr(settings, "S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME", None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
"AWS S3 not configured correctly. Ensure S3_MEDIA_STORAGE_BUCKET is defined.",
)
@cached_property
def _rclone(self):
provider = settings.S3_PROVIDER
return RCloneS3Remote(
bucket_name=self.bucket_name,
access_key_id=self.access_key,
secret_access_key=self.secret_key,
session_token=self.security_token,
region=self.region_name or "",
acl=self.default_acl,
endpoint=self.endpoint_url,
provider=provider,
)
| S3BuildMediaStorage |
python | numba__numba | numba/core/types/iterators.py | {
"start": 2912,
"end": 3534
} | class ____(SimpleIteratorType):
"""
Type class for iterators of array and buffer objects.
"""
def __init__(self, array_type):
self.array_type = array_type
name = "iter(%s)" % (self.array_type,)
nd = array_type.ndim
if nd == 0:
raise TypingError("iteration over a 0-d array")
elif nd == 1:
yield_type = array_type.dtype
else:
# iteration semantics leads to A order layout
yield_type = array_type.copy(ndim=array_type.ndim - 1, layout='A')
super(ArrayIterator, self).__init__(name, yield_type)
| ArrayIterator |
python | pandas-dev__pandas | pandas/io/excel/_xlrd.py | {
"start": 459,
"end": 4562
} | class ____(BaseExcelReader["Book"]):
@doc(storage_options=_shared_docs["storage_options"])
def __init__(
self,
filepath_or_buffer,
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
"""
Reader using xlrd engine.
Parameters
----------
filepath_or_buffer : str, path object or Workbook
Object to be parsed.
{storage_options}
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
"""
err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
import_optional_dependency("xlrd", extra=err_msg)
super().__init__(
filepath_or_buffer,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
@property
def _workbook_class(self) -> type[Book]:
from xlrd import Book
return Book
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
from xlrd import open_workbook
if hasattr(filepath_or_buffer, "read"):
data = filepath_or_buffer.read()
return open_workbook(file_contents=data, **engine_kwargs)
else:
return open_workbook(filepath_or_buffer, **engine_kwargs)
@property
def sheet_names(self):
return self.book.sheet_names()
def get_sheet_by_name(self, name):
self.raise_if_bad_sheet_by_name(name)
return self.book.sheet_by_name(name)
def get_sheet_by_index(self, index):
self.raise_if_bad_sheet_by_index(index)
return self.book.sheet_by_index(index)
def get_sheet_data(
self, sheet, file_rows_needed: int | None = None
) -> list[list[Scalar]]:
from xlrd import (
XL_CELL_BOOLEAN,
XL_CELL_DATE,
XL_CELL_ERROR,
XL_CELL_NUMBER,
xldate,
)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""
converts the contents of the cell into a pandas appropriate object
"""
if cell_typ == XL_CELL_DATE:
# Use the newer xlrd datetime handling.
try:
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if (not epoch1904 and year == (1899, 12, 31)) or (
epoch1904 and year == (1904, 1, 1)
):
cell_contents = time(
cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond,
)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less surprising
if math.isfinite(cell_contents):
# GH54564 - don't attempt to convert NaN/Inf
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
nrows = sheet.nrows
if file_rows_needed is not None:
nrows = min(nrows, file_rows_needed)
return [
[
_parse_cell(value, typ)
for value, typ in zip(
sheet.row_values(i), sheet.row_types(i), strict=True
)
]
for i in range(nrows)
]
| XlrdReader |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 295728,
"end": 296204
} | class ____(sgqlc.types.Input):
"""Ordering options for security advisory connections"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisoryOrderField), graphql_name="field")
"""The field to order security advisories by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| SecurityAdvisoryOrder |
python | kamyu104__LeetCode-Solutions | Python/maximize-cyclic-partition-score.py | {
"start": 38,
"end": 961
} | class ____(object):
def maximumScore(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def best_time_to_buy_and_sell_stock_v(base):
dp = [0]*(len(nums)+1)
result = 0
for i in xrange(k):
x, y = float("-inf"), float("-inf")
new_dp = [float("-inf")]*(len(nums)+1)
for j in xrange(i, len(nums)):
x, y = max(x, dp[j]-nums[(base+j)%len(nums)]), max(y, dp[j]+nums[(base+j)%len(nums)])
new_dp[j+1] = max(new_dp[j], x+nums[(base+j)%len(nums)], y-nums[(base+j)%len(nums)])
dp = new_dp
result = max(result, dp[-1])
return result
i = min(xrange(len(nums)), key=lambda x: nums[x])
return max(best_time_to_buy_and_sell_stock_v(i), best_time_to_buy_and_sell_stock_v(i+1))
| Solution |
python | scikit-learn__scikit-learn | doc/conf.py | {
"start": 21377,
"end": 22392
} | class ____:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, "r") as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
| SubSectionTitleOrder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/transaction.py | {
"start": 495,
"end": 770
} | class ____(BaseModel):
disbursement_date: date
funds_held: bool
settlement_amount: Decimal
settlement_base_currency_exchange_rate: Decimal
settlement_currency_exchange_rate: Decimal
settlement_currency_iso_code: str
success: bool
| DisbursementDetails |
python | falconry__falcon | falcon/errors.py | {
"start": 40300,
"end": 43082
} | class ____(HTTPError):
"""414 URI Too Long.
The server is refusing to service the request because the request-
target is longer than the server is willing to interpret.
This rare condition is only likely to occur when a client has
improperly converted a POST request to a GET request with long query
information, when the client has descended into a "black hole" of
redirection (e.g., a redirected URI prefix that points to a suffix
of itself) or when the server is under attack by a client attempting
to exploit potential security holes.
A 414 response is cacheable by default; i.e., unless otherwise
indicated by the method definition or explicit cache controls.
(See also: RFC 7231, Section 6.5.12)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '414 URI Too Long').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two (default ``None``).
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_414,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPUriTooLong |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_methods.py | {
"start": 2878,
"end": 3175
} | class ____:
def __init__(self, value: str) -> None:
self.value = value
@classmethod
def method(cls): ...
def test_obscure_class_method_tito():
o = ObscureClassMethodTito(_test_source())
# TODO(T113911314): False positive
_test_sink(o.method())
| ObscureClassMethodTito |
python | xlwings__xlwings | tests/test_sheet.py | {
"start": 8299,
"end": 8748
} | class ____(unittest.TestCase):
def test_print_area(self):
sheet = xw.Book().sheets[0]
self.assertIsNone(sheet.page_setup.print_area)
sheet.page_setup.print_area = "A1:B2"
self.assertEqual(sheet.page_setup.print_area, "$A$1:$B$2")
sheet.page_setup.print_area = None
self.assertIsNone(sheet.page_setup.print_area)
sheet.book.close()
if __name__ == "__main__":
unittest.main()
| TestPageSetup |
python | sqlalchemy__sqlalchemy | test/engine/test_pool.py | {
"start": 68888,
"end": 70081
} | class ____(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
| CreatorCompatibilityTest |
python | dask__dask | dask/array/_array_expr/_creation.py | {
"start": 827,
"end": 2375
} | class ____(ArrayExpr):
_parameters = ["start", "stop", "step", "chunks", "like", "dtype", "kwargs"]
_defaults = {"chunks": "auto", "like": None, "dtype": None, "kwargs": None}
@functools.cached_property
def num_rows(self):
return int(max(np.ceil((self.stop - self.start) / self.step), 0))
@functools.cached_property
def dtype(self):
return (
self.operand("dtype")
or np.arange(
self.start,
self.stop,
self.step * self.num_rows if self.num_rows else self.step,
).dtype
)
@functools.cached_property
def _meta(self):
return meta_from_array(self.like, ndim=1, dtype=self.dtype)
@functools.cached_property
def chunks(self):
return normalize_chunks(
self.operand("chunks"), (self.num_rows,), dtype=self.dtype
)
def _layer(self) -> dict:
dsk = {}
elem_count = 0
start, step = self.start, self.step
like = self.like
func = partial(_arange, like=like)
for i, bs in enumerate(self.chunks[0]):
blockstart = start + (elem_count * step)
blockstop = start + ((elem_count + bs) * step)
task = Task(
(self._name, i),
func,
blockstart,
blockstop,
step,
bs,
self.dtype,
)
dsk[(self._name, i)] = task
elem_count += bs
return dsk
| Arange |
python | pytorch__pytorch | tools/linter/adapters/_linter/file_linter.py | {
"start": 469,
"end": 614
} | class ____:
"""How many lines to display before and after an error"""
WINDOW = 5
BEFORE = 2
AFTER = WINDOW - BEFORE - 1
| ErrorLines |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 27102,
"end": 27861
} | class ____(Structure):
_fields_ = (
("init_address", p_uint64),
("init_module", p_uint64),
("reserved1", p_uint64),
("reserved2", p_uint64),
("reserved3", p_uint64),
("reserved4", p_uint64),
("reserved5", p_uint64),
("reserved6", p_uint64),
)
def describe(self):
s = {}
s["init_address"] = int(self.init_address)
s["init_module"] = int(self.init_module)
s["reserved1"] = int(self.reserved1)
s["reserved2"] = int(self.reserved2)
s["reserved3"] = int(self.reserved3)
s["reserved4"] = int(self.reserved4)
s["reserved5"] = int(self.reserved5)
s["reserved6"] = int(self.reserved6)
return s
| routines_command_64 |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 126768,
"end": 126882
} | class ____(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
| TemplateError |
python | Pylons__pyramid | tests/test_config/pkgs/scannable/__init__.py | {
"start": 1778,
"end": 2364
} | class ____(Base):
def __init__(self, context, request):
self.context = context
self.request = request
@view_config(name='method1', renderer=null_renderer)
def method1(self):
return 'method1'
@view_config(name='method2', renderer=null_renderer)
def method2(self):
return 'method2'
@view_config(name='stacked_method2', renderer=null_renderer)
@view_config(name='stacked_method1', renderer=null_renderer)
def stacked(self):
return 'stacked_method'
# ungrokkable
A = 1
B = {}
def stuff():
""" """
| MethodViews |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 2300,
"end": 2481
} | class ____(graphene.Union):
class Meta:
types = launch_pipeline_run_result_types + pipeline_execution_error_types
name = "LaunchRunResult"
| GrapheneLaunchRunResult |
python | django__django | tests/aggregation/test_filter_argument.py | {
"start": 338,
"end": 8805
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="test", age=40)
cls.a2 = Author.objects.create(name="test2", age=60)
cls.a3 = Author.objects.create(name="test3", age=100)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a2,
publisher=cls.p1,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=600,
rating=4.5,
price=Decimal("29.69"),
contact=cls.a3,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.a1.friends.add(cls.a2)
cls.a1.friends.add(cls.a3)
cls.b1.authors.add(cls.a1)
cls.b1.authors.add(cls.a3)
cls.b2.authors.add(cls.a2)
cls.b3.authors.add(cls.a3)
def test_filtered_aggregates(self):
agg = Sum("age", filter=Q(name__startswith="test"))
self.assertEqual(Author.objects.aggregate(age=agg)["age"], 200)
def test_filtered_numerical_aggregates(self):
for aggregate, expected_result in (
(Avg, Approximate(66.7, 1)),
(StdDev, Approximate(24.9, 1)),
(Variance, Approximate(622.2, 1)),
):
with self.subTest(aggregate=aggregate.__name__):
agg = aggregate("age", filter=Q(name__startswith="test"))
self.assertEqual(
Author.objects.aggregate(age=agg)["age"], expected_result
)
def test_empty_filtered_aggregates(self):
agg = Count("pk", filter=Q())
self.assertEqual(Author.objects.aggregate(count=agg)["count"], 3)
def test_empty_filtered_aggregates_with_annotation(self):
agg = Count("pk", filter=Q())
self.assertEqual(
Author.objects.annotate(
age_annotation=F("age"),
).aggregate(
count=agg
)["count"],
3,
)
def test_double_filtered_aggregates(self):
agg = Sum("age", filter=Q(Q(name="test2") & ~Q(name="test")))
self.assertEqual(Author.objects.aggregate(age=agg)["age"], 60)
def test_excluded_aggregates(self):
agg = Sum("age", filter=~Q(name="test2"))
self.assertEqual(Author.objects.aggregate(age=agg)["age"], 140)
def test_related_aggregates_m2m(self):
agg = Sum("friends__age", filter=~Q(friends__name="test"))
self.assertEqual(
Author.objects.filter(name="test").aggregate(age=agg)["age"], 160
)
def test_related_aggregates_m2m_and_fk(self):
q = Q(friends__book__publisher__name="Apress") & ~Q(friends__name="test3")
agg = Sum("friends__book__pages", filter=q)
self.assertEqual(
Author.objects.filter(name="test").aggregate(pages=agg)["pages"], 528
)
def test_plain_annotate(self):
agg = Sum("book__pages", filter=Q(book__rating__gt=3))
qs = Author.objects.annotate(pages=agg).order_by("pk")
self.assertSequenceEqual([a.pages for a in qs], [447, None, 1047])
def test_filtered_aggregate_on_annotate(self):
pages_annotate = Sum("book__pages", filter=Q(book__rating__gt=3))
age_agg = Sum("age", filter=Q(total_pages__gte=400))
aggregated = Author.objects.annotate(total_pages=pages_annotate).aggregate(
summed_age=age_agg
)
self.assertEqual(aggregated, {"summed_age": 140})
def test_case_aggregate(self):
agg = Sum(
Case(When(friends__age=40, then=F("friends__age"))),
filter=Q(friends__name__startswith="test"),
)
self.assertEqual(Author.objects.aggregate(age=agg)["age"], 80)
def test_sum_star_exception(self):
msg = "Star cannot be used with filter. Please specify a field."
with self.assertRaisesMessage(ValueError, msg):
Count("*", filter=Q(age=40))
def test_filtered_reused_subquery(self):
qs = Author.objects.annotate(
older_friends_count=Count("friends", filter=Q(friends__age__gt=F("age"))),
).filter(
older_friends_count__gte=2,
)
self.assertEqual(qs.get(pk__in=qs.values("pk")), self.a1)
def test_filtered_aggregate_ref_annotation(self):
aggs = Author.objects.annotate(double_age=F("age") * 2).aggregate(
cnt=Count("pk", filter=Q(double_age__gt=100)),
)
self.assertEqual(aggs["cnt"], 2)
def test_filtered_aggregate_ref_subquery_annotation(self):
aggs = Author.objects.annotate(
earliest_book_year=Subquery(
Book.objects.filter(
contact__pk=OuterRef("pk"),
)
.order_by("pubdate")
.values("pubdate__year")[:1]
),
).aggregate(
cnt=Count("pk", filter=Q(earliest_book_year=2008)),
)
self.assertEqual(aggs["cnt"], 2)
def test_filtered_aggregate_ref_multiple_subquery_annotation(self):
aggregate = (
Book.objects.values("publisher")
.annotate(
has_authors=Exists(
Book.authors.through.objects.filter(book=OuterRef("pk")),
),
authors_have_other_books=Exists(
Book.objects.filter(
authors__in=Author.objects.filter(
book_contact_set=OuterRef(OuterRef("pk")),
)
).exclude(pk=OuterRef("pk")),
),
)
.aggregate(
max_rating=Max(
"rating",
filter=Q(has_authors=True, authors_have_other_books=False),
)
)
)
self.assertEqual(aggregate, {"max_rating": 4.5})
def test_filtered_aggregrate_ref_in_subquery_annotation(self):
aggs = (
Author.objects.annotate(
count=Subquery(
Book.objects.annotate(
weird_count=Count(
"pk",
filter=Q(pages=OuterRef("age")),
)
).values("weird_count")[:1]
),
)
.order_by("pk")
.aggregate(sum=Sum("count"))
)
self.assertEqual(aggs["sum"], 0)
def test_filtered_aggregate_on_exists(self):
aggregate = Book.objects.values("publisher").aggregate(
max_rating=Max(
"rating",
filter=Exists(
Book.authors.through.objects.filter(book=OuterRef("pk")),
),
),
)
self.assertEqual(aggregate, {"max_rating": 4.5})
def test_filtered_aggregate_empty_condition(self):
book = Book.objects.annotate(
authors_count=Count(
"authors",
filter=Q(authors__in=[]),
),
).get(pk=self.b1.pk)
self.assertEqual(book.authors_count, 0)
aggregate = Book.objects.aggregate(
max_rating=Max("rating", filter=Q(rating__in=[]))
)
self.assertEqual(aggregate, {"max_rating": None})
def test_filtered_aggregate_full_condition(self):
book = Book.objects.annotate(
authors_count=Count(
"authors",
filter=~Q(authors__in=[]),
),
).get(pk=self.b1.pk)
self.assertEqual(book.authors_count, 2)
aggregate = Book.objects.aggregate(
max_rating=Max("rating", filter=~Q(rating__in=[]))
)
self.assertEqual(aggregate, {"max_rating": 4.5})
| FilteredAggregateTests |
python | doocs__leetcode | solution/3500-3599/3502.Minimum Cost to Reach Every Position/Solution.py | {
"start": 0,
"end": 243
} | class ____:
def minCosts(self, cost: List[int]) -> List[int]:
n = len(cost)
ans = [0] * n
mi = cost[0]
for i, c in enumerate(cost):
mi = min(mi, c)
ans[i] = mi
return ans
| Solution |
python | walkccc__LeetCode | solutions/894. All Possible Full Binary Trees/894.py | {
"start": 0,
"end": 482
} | class ____:
@functools.lru_cache(None)
def allPossibleFBT(self, n: int) -> list[TreeNode | None]:
if n % 2 == 0:
return []
if n == 1:
return [TreeNode(0)]
ans = []
for leftCount in range(n):
rightCount = n - 1 - leftCount
for left in self.allPossibleFBT(leftCount):
for right in self.allPossibleFBT(rightCount):
ans.append(TreeNode(0))
ans[-1].left = left
ans[-1].right = right
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/gpt_oss/modeling_gpt_oss.py | {
"start": 14371,
"end": 17846
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: GptOssConfig, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
self.sinks = nn.Parameter(torch.empty(config.num_attention_heads))
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
position_ids=position_ids,
s_aux=self.sinks, # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| GptOssAttention |
python | encode__django-rest-framework | tests/schemas/views.py | {
"start": 4992,
"end": 5079
} | class ____(models.Model):
first_name = models.CharField(max_length=30)
| OpenAPIExample |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/chat.py | {
"start": 10220,
"end": 11186
} | class ____(BaseStringMessagePromptTemplate):
"""Chat message prompt template."""
role: str
"""Role of the message."""
def format(self, **kwargs: Any) -> BaseMessage:
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return ChatMessage(
content=text, role=self.role, additional_kwargs=self.additional_kwargs
)
async def aformat(self, **kwargs: Any) -> BaseMessage:
"""Async format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = await self.prompt.aformat(**kwargs)
return ChatMessage(
content=text, role=self.role, additional_kwargs=self.additional_kwargs
)
| ChatMessagePromptTemplate |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/rescaling/quantile_transformer.py | {
"start": 681,
"end": 2766
} | class ____(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
n_quantiles: int,
output_distribution: str,
random_state: Optional[Union[int, np.random.RandomState]] = None,
) -> None:
from sklearn.preprocessing import QuantileTransformer
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.preprocessor = QuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
copy=False,
random_state=random_state,
)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "QuantileTransformer",
"name": "QuantileTransformer",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out if this is right!
"handles_sparse": True,
"handles_dense": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (INPUT, SIGNED_DATA),
"preferred_dtype": None,
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
# TODO parametrize like the Random Forest as n_quantiles = n_features^param
n_quantiles = UniformIntegerHyperparameter(
"n_quantiles", lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
"output_distribution", ["normal", "uniform"]
)
cs.add_hyperparameters((n_quantiles, output_distribution))
return cs
| QuantileTransformerComponent |
python | Netflix__metaflow | metaflow/_vendor/packaging/specifiers.py | {
"start": 2923,
"end": 25567
} | class ____(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
@property
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = ".".join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(prospective.public)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> List[str]:
result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
| Specifier |
python | openai__openai-python | src/openai/resources/responses/input_tokens.py | {
"start": 7228,
"end": 13504
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncInputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncInputTokensWithStreamingResponse(self)
async def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Get input token counts
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/responses/input_tokens",
body=await async_maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
| AsyncInputTokens |
python | pytorch__pytorch | torch/testing/_internal/common_distributed.py | {
"start": 55120,
"end": 55644
} | class ____(nn.Module):
def __init__(
self,
forward_inputs: dict[nn.Module, torch.Tensor],
cast_forward_inputs: bool,
) -> None:
super().__init__()
self.l = nn.Linear(100, 100)
self.forward_inputs = forward_inputs
self.cast_forward_inputs = cast_forward_inputs
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.forward_inputs[self] = x
return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x)
| SaveForwardInputsModule |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/llama_index/packs/deeplake_multimodal_retrieval/base.py | {
"start": 476,
"end": 2992
} | class ____(BaseLlamaPack):
"""DeepLake Multimodal retriever pack."""
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
overwrite: bool = False,
verbose: bool = True,
nodes: Optional[List[BaseNode]] = None,
top_k: int = 4,
**kwargs: Any,
):
# text vector store
self._text_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_text",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
# image vector store
self._image_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_image",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex(
nodes,
storage_context=self._storage_context,
image_vector_store=self._image_vectorstore,
)
else:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex.from_vector_store(
self._text_vectorstore,
image_vector_store=self._image_vectorstore,
)
self.retriever = self._index.as_retriever(
similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
)
self.query_engine = SimpleMultiModalQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"text_vectorstore": self._text_vectorstore,
"image_vectorstore": self._image_vectorstore,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.query_engine.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
| DeepLakeMultimodalRetrieverPack |
python | cython__cython | tests/run/py_classbody.py | {
"start": 820,
"end": 1509
} | class ____(object):
"""
>>> ForLoopInPyClass.i # doctest: +ELLIPSIS
Traceback (most recent call last):
AttributeError: ...ForLoopInPyClass... has no attribute ...i...
>>> ForLoopInPyClass.k
0
>>> ForLoopInPyClass.m
1
"""
for i in range(0):
pass
for k in range(1):
pass
for m in range(2):
pass
def del_in_class(x):
"""
>>> del_in_class(True)
no error
>>> del_in_class(False)
NameError
"""
try:
class Test(object):
if x:
attr = 1
del attr
except NameError:
print("NameError")
else:
print("no error")
| ForLoopInPyClass |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position03.py | {
"start": 315,
"end": 875
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"object_position": 3})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/gemma2/modeling_gemma2.py | {
"start": 10288,
"end": 13924
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Gemma2Config, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = not getattr(config, "use_bidirectional_attention", False)
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Gemma2Attention |
python | pypa__pip | src/pip/_vendor/rich/table.py | {
"start": 6133,
"end": 40049
} | class ____(JupyterMixin):
"""A console renderable to draw a table.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
show_header (bool, optional): Show a header row. Defaults to True.
show_footer (bool, optional): Show a footer row. Defaults to False.
show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
show_lines (bool, optional): Draw lines between every row. Defaults to False.
leading (int, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
style (Union[str, Style], optional): Default style for the table. Defaults to "none".
row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
border_style (Union[str, Style], optional): Style of the border. Defaults to None.
title_style (Union[str, Style], optional): Style of the title. Defaults to None.
caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
title_justify (str, optional): Justify method for title. Defaults to "center".
caption_justify (str, optional): Justify method for caption. Defaults to "center".
highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
"""
columns: List[Column]
rows: List[Row]
def __init__(
self,
*headers: Union[Column, str],
title: Optional[TextType] = None,
caption: Optional[TextType] = None,
width: Optional[int] = None,
min_width: Optional[int] = None,
box: Optional[box.Box] = box.HEAVY_HEAD,
safe_box: Optional[bool] = None,
padding: PaddingDimensions = (0, 1),
collapse_padding: bool = False,
pad_edge: bool = True,
expand: bool = False,
show_header: bool = True,
show_footer: bool = False,
show_edge: bool = True,
show_lines: bool = False,
leading: int = 0,
style: StyleType = "none",
row_styles: Optional[Iterable[StyleType]] = None,
header_style: Optional[StyleType] = "table.header",
footer_style: Optional[StyleType] = "table.footer",
border_style: Optional[StyleType] = None,
title_style: Optional[StyleType] = None,
caption_style: Optional[StyleType] = None,
title_justify: "JustifyMethod" = "center",
caption_justify: "JustifyMethod" = "center",
highlight: bool = False,
) -> None:
self.columns: List[Column] = []
self.rows: List[Row] = []
self.title = title
self.caption = caption
self.width = width
self.min_width = min_width
self.box = box
self.safe_box = safe_box
self._padding = Padding.unpack(padding)
self.pad_edge = pad_edge
self._expand = expand
self.show_header = show_header
self.show_footer = show_footer
self.show_edge = show_edge
self.show_lines = show_lines
self.leading = leading
self.collapse_padding = collapse_padding
self.style = style
self.header_style = header_style or ""
self.footer_style = footer_style or ""
self.border_style = border_style
self.title_style = title_style
self.caption_style = caption_style
self.title_justify: "JustifyMethod" = title_justify
self.caption_justify: "JustifyMethod" = caption_justify
self.highlight = highlight
self.row_styles: Sequence[StyleType] = list(row_styles or [])
append_column = self.columns.append
for header in headers:
if isinstance(header, str):
self.add_column(header=header)
else:
header._index = len(self.columns)
append_column(header)
@classmethod
def grid(
cls,
*headers: Union[Column, str],
padding: PaddingDimensions = 0,
collapse_padding: bool = True,
pad_edge: bool = False,
expand: bool = False,
) -> "Table":
"""Get a table with no lines, headers, or footer.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
Returns:
Table: A table instance.
"""
return cls(
*headers,
box=None,
padding=padding,
collapse_padding=collapse_padding,
show_header=False,
show_footer=False,
show_edge=False,
pad_edge=pad_edge,
expand=expand,
)
@property
def expand(self) -> bool:
"""Setting a non-None self.width implies expand."""
return self._expand or self.width is not None
@expand.setter
def expand(self, expand: bool) -> None:
"""Set expand."""
self._expand = expand
@property
def _extra_width(self) -> int:
"""Get extra width to add to cell content."""
width = 0
if self.box and self.show_edge:
width += 2
if self.box:
width += len(self.columns) - 1
return width
@property
def row_count(self) -> int:
"""Get the current number of rows."""
return len(self.rows)
def get_row_style(self, console: "Console", index: int) -> StyleType:
"""Get the current row style."""
style = Style.null()
if self.row_styles:
style += console.get_style(self.row_styles[index % len(self.row_styles)])
row_style = self.rows[index].style
if row_style is not None:
style += console.get_style(row_style)
return style
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
max_width = options.max_width
if self.width is not None:
max_width = self.width
if max_width < 0:
return Measurement(0, 0)
extra_width = self._extra_width
max_width = sum(
self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
)
_measure_column = self._measure_column
measurements = [
_measure_column(console, options.update_width(max_width), column)
for column in self.columns
]
minimum_width = (
sum(measurement.minimum for measurement in measurements) + extra_width
)
maximum_width = (
sum(measurement.maximum for measurement in measurements) + extra_width
if (self.width is None)
else self.width
)
measurement = Measurement(minimum_width, maximum_width)
measurement = measurement.clamp(self.min_width)
return measurement
@property
def padding(self) -> Tuple[int, int, int, int]:
"""Get cell padding."""
return self._padding
@padding.setter
def padding(self, padding: PaddingDimensions) -> "Table":
"""Set cell padding."""
self._padding = Padding.unpack(padding)
return self
def add_column(
self,
header: "RenderableType" = "",
footer: "RenderableType" = "",
*,
header_style: Optional[StyleType] = None,
highlight: Optional[bool] = None,
footer_style: Optional[StyleType] = None,
style: Optional[StyleType] = None,
justify: "JustifyMethod" = "left",
vertical: "VerticalAlignMethod" = "top",
overflow: "OverflowMethod" = "ellipsis",
width: Optional[int] = None,
min_width: Optional[int] = None,
max_width: Optional[int] = None,
ratio: Optional[int] = None,
no_wrap: bool = False,
) -> None:
"""Add a column to the table.
Args:
header (RenderableType, optional): Text or renderable for the header.
Defaults to "".
footer (RenderableType, optional): Text or renderable for the footer.
Defaults to "".
header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
highlight (bool, optional): Whether to highlight the text. The default of None uses the value of the table (self) object.
footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
"""
column = Column(
_index=len(self.columns),
header=header,
footer=footer,
header_style=header_style or "",
highlight=highlight if highlight is not None else self.highlight,
footer_style=footer_style or "",
style=style or "",
justify=justify,
vertical=vertical,
overflow=overflow,
width=width,
min_width=min_width,
max_width=max_width,
ratio=ratio,
no_wrap=no_wrap,
)
self.columns.append(column)
def add_row(
self,
*renderables: Optional["RenderableType"],
style: Optional[StyleType] = None,
end_section: bool = False,
) -> None:
"""Add a row of renderables.
Args:
*renderables (None or renderable): Each cell in a row must be a renderable object (including str),
or ``None`` for a blank cell.
style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
end_section (bool, optional): End a section and draw a line. Defaults to False.
Raises:
errors.NotRenderableError: If you add something that can't be rendered.
"""
def add_cell(column: Column, renderable: "RenderableType") -> None:
column._cells.append(renderable)
cell_renderables: List[Optional["RenderableType"]] = list(renderables)
columns = self.columns
if len(cell_renderables) < len(columns):
cell_renderables = [
*cell_renderables,
*[None] * (len(columns) - len(cell_renderables)),
]
for index, renderable in enumerate(cell_renderables):
if index == len(columns):
column = Column(_index=index, highlight=self.highlight)
for _ in self.rows:
add_cell(column, Text(""))
self.columns.append(column)
else:
column = columns[index]
if renderable is None:
add_cell(column, "")
elif is_renderable(renderable):
add_cell(column, renderable)
else:
raise errors.NotRenderableError(
f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
)
self.rows.append(Row(style=style, end_section=end_section))
def add_section(self) -> None:
"""Add a new section (draw a line after current row)."""
if self.rows:
self.rows[-1].end_section = True
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if not self.columns:
yield Segment("\n")
return
max_width = options.max_width
if self.width is not None:
max_width = self.width
extra_width = self._extra_width
widths = self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
table_width = sum(widths) + extra_width
render_options = options.update(
width=table_width, highlight=self.highlight, height=None
)
def render_annotation(
text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
) -> "RenderResult":
render_text = (
console.render_str(text, style=style, highlight=False)
if isinstance(text, str)
else text
)
return console.render(
render_text, options=render_options.update(justify=justify)
)
if self.title:
yield from render_annotation(
self.title,
style=Style.pick_first(self.title_style, "table.title"),
justify=self.title_justify,
)
yield from self._render(console, render_options, widths)
if self.caption:
yield from render_annotation(
self.caption,
style=Style.pick_first(self.caption_style, "table.caption"),
justify=self.caption_justify,
)
def _calculate_column_widths(
self, console: "Console", options: "ConsoleOptions"
) -> List[int]:
"""Calculate the widths of each column, including padding, not including borders."""
max_width = options.max_width
columns = self.columns
width_ranges = [
self._measure_column(console, options, column) for column in columns
]
widths = [_range.maximum or 1 for _range in width_ranges]
get_padding_width = self._get_padding_width
extra_width = self._extra_width
if self.expand:
ratios = [col.ratio or 0 for col in columns if col.flexible]
if any(ratios):
fixed_widths = [
0 if column.flexible else _range.maximum
for _range, column in zip(width_ranges, columns)
]
flex_minimum = [
(column.width or 1) + get_padding_width(column._index)
for column in columns
if column.flexible
]
flexible_width = max_width - sum(fixed_widths)
flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
iter_flex_widths = iter(flex_widths)
for index, column in enumerate(columns):
if column.flexible:
widths[index] = fixed_widths[index] + next(iter_flex_widths)
table_width = sum(widths)
if table_width > max_width:
widths = self._collapse_widths(
widths,
[(column.width is None and not column.no_wrap) for column in columns],
max_width,
)
table_width = sum(widths)
# last resort, reduce columns evenly
if table_width > max_width:
excess_width = table_width - max_width
widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
table_width = sum(widths)
width_ranges = [
self._measure_column(console, options.update_width(width), column)
for width, column in zip(widths, columns)
]
widths = [_range.maximum or 0 for _range in width_ranges]
if (table_width < max_width and self.expand) or (
self.min_width is not None and table_width < (self.min_width - extra_width)
):
_max_width = (
max_width
if self.min_width is None
else min(self.min_width - extra_width, max_width)
)
pad_widths = ratio_distribute(_max_width - table_width, widths)
widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
return widths
@classmethod
def _collapse_widths(
cls, widths: List[int], wrapable: List[bool], max_width: int
) -> List[int]:
"""Reduce widths so that the total is under max_width.
Args:
widths (List[int]): List of widths.
wrapable (List[bool]): List of booleans that indicate if a column may shrink.
max_width (int): Maximum width to reduce to.
Returns:
List[int]: A new list of widths.
"""
total_width = sum(widths)
excess_width = total_width - max_width
if any(wrapable):
while total_width and excess_width > 0:
max_column = max(
width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
)
second_max_column = max(
width if allow_wrap and width != max_column else 0
for width, allow_wrap in zip(widths, wrapable)
)
column_difference = max_column - second_max_column
ratios = [
(1 if (width == max_column and allow_wrap) else 0)
for width, allow_wrap in zip(widths, wrapable)
]
if not any(ratios) or not column_difference:
break
max_reduce = [min(excess_width, column_difference)] * len(widths)
widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
total_width = sum(widths)
excess_width = total_width - max_width
return widths
def _get_cells(
self, console: "Console", column_index: int, column: Column
) -> Iterable[_Cell]:
"""Get all the cells with padding and optional header."""
collapse_padding = self.collapse_padding
pad_edge = self.pad_edge
padding = self.padding
any_padding = any(padding)
first_column = column_index == 0
last_column = column_index == len(self.columns) - 1
_padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
cached = _padding_cache.get((first_row, last_row))
if cached:
return cached
top, right, bottom, left = padding
if collapse_padding:
if not first_column:
left = max(0, left - right)
if not last_row:
bottom = max(0, top - bottom)
if not pad_edge:
if first_column:
left = 0
if last_column:
right = 0
if first_row:
top = 0
if last_row:
bottom = 0
_padding = (top, right, bottom, left)
_padding_cache[(first_row, last_row)] = _padding
return _padding
raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
_append = raw_cells.append
get_style = console.get_style
if self.show_header:
header_style = get_style(self.header_style or "") + get_style(
column.header_style
)
_append((header_style, column.header))
cell_style = get_style(column.style or "")
for cell in column.cells:
_append((cell_style, cell))
if self.show_footer:
footer_style = get_style(self.footer_style or "") + get_style(
column.footer_style
)
_append((footer_style, column.footer))
if any_padding:
_Padding = Padding
for first, last, (style, renderable) in loop_first_last(raw_cells):
yield _Cell(
style,
_Padding(renderable, get_padding(first, last)),
getattr(renderable, "vertical", None) or column.vertical,
)
else:
for style, renderable in raw_cells:
yield _Cell(
style,
renderable,
getattr(renderable, "vertical", None) or column.vertical,
)
def _get_padding_width(self, column_index: int) -> int:
"""Get extra width from padding."""
_, pad_right, _, pad_left = self.padding
if self.collapse_padding:
if column_index > 0:
pad_left = max(0, pad_left - pad_right)
return pad_left + pad_right
def _measure_column(
self,
console: "Console",
options: "ConsoleOptions",
column: Column,
) -> Measurement:
"""Get the minimum and maximum width of the column."""
max_width = options.max_width
if max_width < 1:
return Measurement(0, 0)
padding_width = self._get_padding_width(column._index)
if column.width is not None:
# Fixed width column
return Measurement(
column.width + padding_width, column.width + padding_width
).with_maximum(max_width)
# Flexible column, we need to measure contents
min_widths: List[int] = []
max_widths: List[int] = []
append_min = min_widths.append
append_max = max_widths.append
get_render_width = Measurement.get
for cell in self._get_cells(console, column._index, column):
_min, _max = get_render_width(console, options, cell.renderable)
append_min(_min)
append_max(_max)
measurement = Measurement(
max(min_widths) if min_widths else 1,
max(max_widths) if max_widths else max_width,
).with_maximum(max_width)
measurement = measurement.clamp(
None if column.min_width is None else column.min_width + padding_width,
None if column.max_width is None else column.max_width + padding_width,
)
return measurement
def _render(
self, console: "Console", options: "ConsoleOptions", widths: List[int]
) -> "RenderResult":
table_style = console.get_style(self.style or "")
border_style = table_style + console.get_style(self.border_style or "")
_column_cells = (
self._get_cells(console, column_index, column)
for column_index, column in enumerate(self.columns)
)
row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
_box = (
self.box.substitute(
options, safe=pick_bool(self.safe_box, console.safe_box)
)
if self.box
else None
)
_box = _box.get_plain_headed_box() if _box and not self.show_header else _box
new_line = Segment.line()
columns = self.columns
show_header = self.show_header
show_footer = self.show_footer
show_edge = self.show_edge
show_lines = self.show_lines
leading = self.leading
_Segment = Segment
if _box:
box_segments = [
(
_Segment(_box.head_left, border_style),
_Segment(_box.head_right, border_style),
_Segment(_box.head_vertical, border_style),
),
(
_Segment(_box.mid_left, border_style),
_Segment(_box.mid_right, border_style),
_Segment(_box.mid_vertical, border_style),
),
(
_Segment(_box.foot_left, border_style),
_Segment(_box.foot_right, border_style),
_Segment(_box.foot_vertical, border_style),
),
]
if show_edge:
yield _Segment(_box.get_top(widths), border_style)
yield new_line
else:
box_segments = []
get_row_style = self.get_row_style
get_style = console.get_style
for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
header_row = first and show_header
footer_row = last and show_footer
row = (
self.rows[index - show_header]
if (not header_row and not footer_row)
else None
)
max_height = 1
cells: List[List[List[Segment]]] = []
if header_row or footer_row:
row_style = Style.null()
else:
row_style = get_style(
get_row_style(console, index - 1 if show_header else index)
)
for width, cell, column in zip(widths, row_cell, columns):
render_options = options.update(
width=width,
justify=column.justify,
no_wrap=column.no_wrap,
overflow=column.overflow,
height=None,
highlight=column.highlight,
)
lines = console.render_lines(
cell.renderable,
render_options,
style=get_style(cell.style) + row_style,
)
max_height = max(max_height, len(lines))
cells.append(lines)
row_height = max(len(cell) for cell in cells)
def align_cell(
cell: List[List[Segment]],
vertical: "VerticalAlignMethod",
width: int,
style: Style,
) -> List[List[Segment]]:
if header_row:
vertical = "bottom"
elif footer_row:
vertical = "top"
if vertical == "top":
return _Segment.align_top(cell, width, row_height, style)
elif vertical == "middle":
return _Segment.align_middle(cell, width, row_height, style)
return _Segment.align_bottom(cell, width, row_height, style)
cells[:] = [
_Segment.set_shape(
align_cell(
cell,
_cell.vertical,
width,
get_style(_cell.style) + row_style,
),
width,
max_height,
)
for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
]
if _box:
if last and show_footer:
yield _Segment(
_box.get_row(widths, "foot", edge=show_edge), border_style
)
yield new_line
left, right, _divider = box_segments[0 if first else (2 if last else 1)]
# If the column divider is whitespace also style it with the row background
divider = (
_divider
if _divider.text.strip()
else _Segment(
_divider.text, row_style.background_style + _divider.style
)
)
for line_no in range(max_height):
if show_edge:
yield left
for last_cell, rendered_cell in loop_last(cells):
yield from rendered_cell[line_no]
if not last_cell:
yield divider
if show_edge:
yield right
yield new_line
else:
for line_no in range(max_height):
for rendered_cell in cells:
yield from rendered_cell[line_no]
yield new_line
if _box and first and show_header:
yield _Segment(
_box.get_row(widths, "head", edge=show_edge), border_style
)
yield new_line
end_section = row and row.end_section
if _box and (show_lines or leading or end_section):
if (
not last
and not (show_footer and index >= len(row_cells) - 2)
and not (show_header and header_row)
):
if leading:
yield _Segment(
_box.get_row(widths, "mid", edge=show_edge) * leading,
border_style,
)
else:
yield _Segment(
_box.get_row(widths, "row", edge=show_edge), border_style
)
yield new_line
if _box and show_edge:
yield _Segment(_box.get_bottom(widths), border_style)
yield new_line
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
from pip._vendor.rich.highlighter import ReprHighlighter
from ._timer import timer
with timer("Table render"):
table = Table(
title="Star Wars Movies",
caption="Rich example table",
caption_justify="right",
)
table.add_column(
"Released", header_style="bright_cyan", style="cyan", no_wrap=True
)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$952,110,690",
)
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row(
"Dec 15, 2017",
"Star Wars Ep. V111: The Last Jedi",
"$1,332,539,889",
style="on black",
end_section=True,
)
table.add_row(
"Dec 16, 2016",
"Rogue One: A Star Wars Story",
"$1,332,439,889",
)
def header(text: str) -> None:
console.print()
console.rule(highlight(text))
console.print()
console = Console()
highlight = ReprHighlighter()
header("Example Table")
console.print(table, justify="center")
table.expand = True
header("expand=True")
console.print(table)
table.width = 50
header("width=50")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
header("row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.leading = 1
header("leading=1, row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.show_lines = True
table.leading = 0
header("show_lines=True, row_styles=['dim', 'none']")
console.print(table, justify="center")
| Table |
python | openai__openai-python | src/openai/types/responses/response_input_item.py | {
"start": 9983,
"end": 10661
} | class ____(BaseModel):
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
status: Literal["completed", "failed"]
"""The status of the apply patch tool call output. One of `completed` or `failed`."""
type: Literal["apply_patch_call_output"]
"""The type of the item. Always `apply_patch_call_output`."""
id: Optional[str] = None
"""The unique ID of the apply patch tool call output.
Populated when this item is returned via API.
"""
output: Optional[str] = None
"""
Optional human-readable log text from the apply patch tool (e.g., patch results
or errors).
"""
| ApplyPatchCallOutput |
python | pytorch__pytorch | torch/_dynamo/variables/lists.py | {
"start": 58652,
"end": 61353
} | class ____(VariableTracker):
def __init__(
self,
items: Sequence[VariableTracker],
tx: Optional["InstructionTranslator"] = None,
**kwargs: Any,
) -> None:
items_to_map = items
start, stop, step = [variables.ConstantVariable.create(None)] * 3
if len(items_to_map) == 1:
(stop,) = items_to_map
elif len(items_to_map) == 2:
start, stop = items_to_map
elif len(items_to_map) == 3:
start, stop, step = items_to_map
else:
raise AssertionError
# Convert TensorVariable to SymIntVariable by calling .item()
# This decomposes a[:t] to u=t.item(); a[:u] at the dynamo level
if isinstance(start, variables.TensorVariable):
assert tx is not None, (
"tx is required when slice indices are TensorVariables"
)
start = start.call_method(tx, "item", [], {})
if isinstance(stop, variables.TensorVariable):
assert tx is not None, (
"tx is required when slice indices are TensorVariables"
)
stop = stop.call_method(tx, "item", [], {})
if isinstance(step, variables.TensorVariable):
assert tx is not None, (
"tx is required when slice indices are TensorVariables"
)
step = step.call_method(tx, "item", [], {})
self.items = (start, stop, step)
super().__init__(**kwargs)
def debug_repr(self) -> str:
return "slice(" + ", ".join(i.debug_repr() for i in self.items) + ")"
def as_proxy(self) -> slice:
return slice(*[x.as_proxy() for x in self.items])
def python_type(self) -> type:
return slice
def as_python_constant(self) -> slice:
return slice(*[guard_if_dyn(x) for x in self.items])
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.foreach(self.items)
codegen.append_output(create_instruction("BUILD_SLICE", arg=len(self.items)))
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name in cmp_name_to_op_mapping:
return variables.GetAttrVariable(self, name)
fields = ["start", "stop", "step"]
if name not in fields:
unimplemented(
gb_type="Unsupported attribute for slice() object",
context=f"var_getattr {self} {name}",
explanation=f"Expected attribute to be one of {','.join(fields)} "
f"but got {name}",
hints=[*graph_break_hints.USER_ERROR],
)
return self.items[fields.index(name)]
| SliceVariable |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/keras_tensor.py | {
"start": 21675,
"end": 22463
} | class ____(KerasTensor):
"""KerasTensor that represents legacy register_symbolic_tensor_type."""
def __init__(self, user_registered_symbolic_object):
x = user_registered_symbolic_object
self._user_registered_symbolic_object = x
type_spec = UserRegisteredSpec(x.shape, x.dtype)
name = getattr(x, 'name', None)
super(UserRegisteredTypeKerasTensor, self).__init__(type_spec, name)
@classmethod
def from_tensor(cls, tensor):
return cls(tensor)
@classmethod
def from_type_spec(cls, type_spec, name=None):
raise NotImplementedError('You cannot instantiate a KerasTensor '
'directly from TypeSpec: %s' % type_spec)
def _to_placeholder(self):
return self._user_registered_symbolic_object
| UserRegisteredTypeKerasTensor |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/messages/beta_message_batch_expired_result.py | {
"start": 207,
"end": 284
} | class ____(BaseModel):
type: Literal["expired"]
| BetaMessageBatchExpiredResult |
python | wandb__wandb | wandb/vendor/pygments/lexers/configs.py | {
"start": 4729,
"end": 7539
} | class ____(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
| KconfigLexer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.