language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
kamyu104__LeetCode-Solutions
|
Python/first-unique-number.py
|
{
"start": 106,
"end": 810
}
|
class ____(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.__q = collections.OrderedDict()
self.__dup = set()
for num in nums:
self.add(num)
def showFirstUnique(self):
"""
:rtype: int
"""
if self.__q:
return next(iter(self.__q))
return -1
def add(self, value):
"""
:type value: int
:rtype: None
"""
if value not in self.__dup and value not in self.__q:
self.__q[value] = None
return
if value in self.__q:
self.__q.pop(value)
self.__dup.add(value)
|
FirstUnique
|
python
|
huggingface__transformers
|
src/transformers/models/canine/modeling_canine.py
|
{
"start": 9907,
"end": 12236
}
|
class ____(nn.Module):
"""
Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size
characters.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.conv = nn.Conv1d(
in_channels=config.hidden_size * 2,
out_channels=config.hidden_size,
kernel_size=config.upsampling_kernel_size,
stride=1,
)
self.activation = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
inputs: torch.Tensor,
final_seq_char_positions: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# inputs has shape [batch, mol_seq, molecule_hidden_size+char_hidden_final]
# we transpose it to be [batch, molecule_hidden_size+char_hidden_final, mol_seq]
inputs = torch.transpose(inputs, 1, 2)
# PyTorch < 1.9 does not support padding="same" (which is used in the original implementation),
# so we pad the tensor manually before passing it to the conv layer
# based on https://github.com/google-research/big_transfer/blob/49afe42338b62af9fbe18f0258197a33ee578a6b/bit_tf2/models.py#L36-L38
pad_total = self.config.upsampling_kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
pad = nn.ConstantPad1d((pad_beg, pad_end), 0)
# `result`: shape (batch_size, char_seq_len, hidden_size)
result = self.conv(pad(inputs))
result = torch.transpose(result, 1, 2)
result = self.activation(result)
result = self.LayerNorm(result)
result = self.dropout(result)
final_char_seq = result
if final_seq_char_positions is not None:
# Limit transformer query seq and attention mask to these character
# positions to greatly reduce the compute cost. Typically, this is just
# done for the MLM training task.
# TODO add support for MLM
raise NotImplementedError("CanineForMaskedLM is currently not supported")
else:
query_seq = final_char_seq
return query_seq
|
ConvProjection
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/batch/base.py
|
{
"start": 32005,
"end": 32923
}
|
class ____:
def __init__(self, send: threading.Thread, recv: threading.Thread):
self.send = send
self.recv = recv
self.__started_recv = False
self.__started_send = False
def start_recv(self) -> None:
if not self.__started_recv:
self.recv.start()
self.__started_recv = True
def start_send(self) -> None:
if not self.__started_send:
self.send.start()
self.__started_send = True
def is_alive(self) -> bool:
"""Check if the background threads are still alive."""
return self.send_alive() or self.recv_alive()
def send_alive(self) -> bool:
"""Check if the send background thread is still alive."""
return self.send.is_alive()
def recv_alive(self) -> bool:
"""Check if the recv background thread is still alive."""
return self.recv.is_alive()
|
_BgThreads
|
python
|
django__django
|
tests/middleware_exceptions/tests.py
|
{
"start": 6347,
"end": 6673
}
|
class ____:
def __init__(self, get_response):
raise MiddlewareNotUsed("spam eggs")
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF="middleware_exceptions.urls",
MIDDLEWARE=["django.middleware.common.CommonMiddleware"],
)
|
MyMiddlewareWithExceptionMessage
|
python
|
TheAlgorithms__Python
|
data_structures/binary_tree/segment_tree.py
|
{
"start": 14,
"end": 3276
}
|
class ____:
def __init__(self, a):
self.A = a
self.N = len(self.A)
self.st = [0] * (
4 * self.N
) # approximate the overall size of segment tree with array N
if self.N:
self.build(1, 0, self.N - 1)
def left(self, idx):
"""
Returns the left child index for a given index in a binary tree.
>>> s = SegmentTree([1, 2, 3])
>>> s.left(1)
2
>>> s.left(2)
4
"""
return idx * 2
def right(self, idx):
"""
Returns the right child index for a given index in a binary tree.
>>> s = SegmentTree([1, 2, 3])
>>> s.right(1)
3
>>> s.right(2)
5
"""
return idx * 2 + 1
def build(self, idx, left, right):
if left == right:
self.st[idx] = self.A[left]
else:
mid = (left + right) // 2
self.build(self.left(idx), left, mid)
self.build(self.right(idx), mid + 1, right)
self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
def update(self, a, b, val):
"""
Update the values in the segment tree in the range [a,b] with the given value.
>>> s = SegmentTree([1, 2, 3, 4, 5])
>>> s.update(2, 4, 10)
True
>>> s.query(1, 5)
10
"""
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
def update_recursive(self, idx, left, right, a, b, val):
"""
update(1, 1, N, a, b, v) for update val v to [a,b]
"""
if right < a or left > b:
return True
if left == right:
self.st[idx] = val
return True
mid = (left + right) // 2
self.update_recursive(self.left(idx), left, mid, a, b, val)
self.update_recursive(self.right(idx), mid + 1, right, a, b, val)
self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
return True
def query(self, a, b):
"""
Query the maximum value in the range [a,b].
>>> s = SegmentTree([1, 2, 3, 4, 5])
>>> s.query(1, 3)
3
>>> s.query(1, 5)
5
"""
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
def query_recursive(self, idx, left, right, a, b):
"""
query(1, 1, N, a, b) for query max of [a,b]
"""
if right < a or left > b:
return -math.inf
if left >= a and right <= b:
return self.st[idx]
mid = (left + right) // 2
q1 = self.query_recursive(self.left(idx), left, mid, a, b)
q2 = self.query_recursive(self.right(idx), mid + 1, right, a, b)
return max(q1, q2)
def show_data(self):
show_list = []
for i in range(1, self.N + 1):
show_list += [self.query(i, i)]
print(show_list)
if __name__ == "__main__":
A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
N = 15
segt = SegmentTree(A)
print(segt.query(4, 6))
print(segt.query(7, 11))
print(segt.query(7, 12))
segt.update(1, 3, 111)
print(segt.query(1, 15))
segt.update(7, 8, 235)
segt.show_data()
|
SegmentTree
|
python
|
ray-project__ray
|
python/ray/util/serialization.py
|
{
"start": 1247,
"end": 2009
}
|
class ____:
# NOTE(simon): Used for registering custom serializers. We cannot directly
# use the SerializationContext because it requires Ray workers. Please
# make sure to keep the API consistent.
def _register_cloudpickle_reducer(self, cls, reducer):
pickle.CloudPickler.dispatch[cls] = reducer
def _unregister_cloudpickle_reducer(self, cls):
pickle.CloudPickler.dispatch.pop(cls, None)
def _register_cloudpickle_serializer(
self, cls, custom_serializer, custom_deserializer
):
def _CloudPicklerReducer(obj):
return custom_deserializer, (custom_serializer(obj),)
# construct a reducer
pickle.CloudPickler.dispatch[cls] = _CloudPicklerReducer
|
StandaloneSerializationContext
|
python
|
numba__llvmlite
|
llvmlite/binding/value.py
|
{
"start": 1685,
"end": 11701
}
|
class ____(ffi.ObjectRef):
"""A weak reference to a LLVM value.
"""
def __init__(self, ptr, kind, parents):
self._kind = kind
self._parents = parents
ffi.ObjectRef.__init__(self, ptr)
def __str__(self):
with ffi.OutputString() as outstr:
ffi.lib.LLVMPY_PrintValueToString(self, outstr)
return str(outstr)
@property
def module(self):
"""
The module this function or global variable value was obtained from.
"""
return self._parents.get('module')
@property
def function(self):
"""
The function this argument or basic block value was obtained from.
"""
return self._parents.get('function')
@property
def block(self):
"""
The block this instruction value was obtained from.
"""
return self._parents.get('block')
@property
def instruction(self):
"""
The instruction this operand value was obtained from.
"""
return self._parents.get('instruction')
@property
def is_global(self):
return self._kind == 'global'
@property
def is_function(self):
return self._kind == 'function'
@property
def is_block(self):
return self._kind == 'block'
@property
def is_argument(self):
return self._kind == 'argument'
@property
def is_instruction(self):
return self._kind == 'instruction'
@property
def is_operand(self):
return self._kind == 'operand'
@property
def is_constant(self):
return bool(ffi.lib.LLVMPY_IsConstant(self))
@property
def value_kind(self):
return ValueKind(ffi.lib.LLVMPY_GetValueKind(self))
@property
def name(self):
return _decode_string(ffi.lib.LLVMPY_GetValueName(self))
@name.setter
def name(self, val):
ffi.lib.LLVMPY_SetValueName(self, _encode_string(val))
@property
def linkage(self):
return Linkage(ffi.lib.LLVMPY_GetLinkage(self))
@linkage.setter
def linkage(self, value):
if not isinstance(value, Linkage):
value = Linkage[value]
ffi.lib.LLVMPY_SetLinkage(self, value)
@property
def visibility(self):
return Visibility(ffi.lib.LLVMPY_GetVisibility(self))
@visibility.setter
def visibility(self, value):
if not isinstance(value, Visibility):
value = Visibility[value]
ffi.lib.LLVMPY_SetVisibility(self, value)
@property
def storage_class(self):
return StorageClass(ffi.lib.LLVMPY_GetDLLStorageClass(self))
@storage_class.setter
def storage_class(self, value):
if not isinstance(value, StorageClass):
value = StorageClass[value]
ffi.lib.LLVMPY_SetDLLStorageClass(self, value)
def add_function_attribute(self, attr):
"""Only works on function value
Parameters
-----------
attr : str
attribute name
"""
if not self.is_function:
raise ValueError('expected function value, got %s' % (self._kind,))
attrname = str(attr)
attrval = ffi.lib.LLVMPY_GetEnumAttributeKindForName(
_encode_string(attrname), len(attrname))
if attrval == 0:
raise ValueError('no such attribute {!r}'.format(attrname))
ffi.lib.LLVMPY_AddFunctionAttr(self, attrval)
@property
def type(self):
"""
This value's LLVM type.
"""
# XXX what does this return?
return TypeRef(ffi.lib.LLVMPY_TypeOf(self))
@property
def global_value_type(self):
"""
Uses ``LLVMGlobalGetValueType()``.
Needed for opaque pointers in globals.
> For globals, use getValueType().
See https://llvm.org/docs/OpaquePointers.html#migration-instructions
"""
assert self.is_global or self.is_function
return TypeRef(ffi.lib.LLVMPY_GlobalGetValueType(self))
@property
def is_declaration(self):
"""
Whether this value (presumably global) is defined in the current
module.
"""
if not (self.is_global or self.is_function):
raise ValueError('expected global or function value, got %s'
% (self._kind,))
return ffi.lib.LLVMPY_IsDeclaration(self)
@property
def attributes(self):
"""
Return an iterator over this value's attributes.
The iterator will yield a string for each attribute.
"""
itr = iter(())
if self.is_function:
it = ffi.lib.LLVMPY_FunctionAttributesIter(self)
itr = _AttributeListIterator(it)
elif self.is_instruction:
if self.opcode == 'call':
it = ffi.lib.LLVMPY_CallInstAttributesIter(self)
itr = _AttributeListIterator(it)
elif self.opcode == 'invoke':
it = ffi.lib.LLVMPY_InvokeInstAttributesIter(self)
itr = _AttributeListIterator(it)
elif self.is_global:
it = ffi.lib.LLVMPY_GlobalAttributesIter(self)
itr = _AttributeSetIterator(it)
elif self.is_argument:
it = ffi.lib.LLVMPY_ArgumentAttributesIter(self)
itr = _AttributeSetIterator(it)
return itr
@property
def blocks(self):
"""
Return an iterator over this function's blocks.
The iterator will yield a ValueRef for each block.
"""
if not self.is_function:
raise ValueError('expected function value, got %s' % (self._kind,))
it = ffi.lib.LLVMPY_FunctionBlocksIter(self)
parents = self._parents.copy()
parents.update(function=self)
return _BlocksIterator(it, parents)
@property
def arguments(self):
"""
Return an iterator over this function's arguments.
The iterator will yield a ValueRef for each argument.
"""
if not self.is_function:
raise ValueError('expected function value, got %s' % (self._kind,))
it = ffi.lib.LLVMPY_FunctionArgumentsIter(self)
parents = self._parents.copy()
parents.update(function=self)
return _ArgumentsIterator(it, parents)
@property
def instructions(self):
"""
Return an iterator over this block's instructions.
The iterator will yield a ValueRef for each instruction.
"""
if not self.is_block:
raise ValueError('expected block value, got %s' % (self._kind,))
it = ffi.lib.LLVMPY_BlockInstructionsIter(self)
parents = self._parents.copy()
parents.update(block=self)
return _InstructionsIterator(it, parents)
@property
def operands(self):
"""
Return an iterator over this instruction's operands.
The iterator will yield a ValueRef for each operand.
"""
if not self.is_instruction:
raise ValueError('expected instruction value, got %s'
% (self._kind,))
it = ffi.lib.LLVMPY_InstructionOperandsIter(self)
parents = self._parents.copy()
parents.update(instruction=self)
return _OperandsIterator(it, parents)
@property
def opcode(self):
if not self.is_instruction:
raise ValueError('expected instruction value, got %s'
% (self._kind,))
return ffi.ret_string(ffi.lib.LLVMPY_GetOpcodeName(self))
@property
def incoming_blocks(self):
"""
Return an iterator over this phi instruction's incoming blocks.
The iterator will yield a ValueRef for each block.
"""
if not self.is_instruction or self.opcode != 'phi':
raise ValueError('expected phi instruction value, got %s'
% (self._kind,))
it = ffi.lib.LLVMPY_PhiIncomingBlocksIter(self)
parents = self._parents.copy()
parents.update(instruction=self)
return _IncomingBlocksIterator(it, parents)
def get_constant_value(self, signed_int=False, round_fp=False):
"""
Return the constant value, either as a literal (when supported)
or as a string.
Parameters
-----------
signed_int : bool
if True and the constant is an integer, returns a signed version
round_fp : bool
if True and the constant is a floating point value, rounds the
result upon accuracy loss (e.g., when querying an fp128 value).
By default, raises an exception on accuracy loss
"""
if not self.is_constant:
raise ValueError('expected constant value, got %s'
% (self._kind,))
if self.value_kind == ValueKind.constant_int:
# Python integers are also arbitrary-precision
little_endian = c_bool(False)
words = ffi.lib.LLVMPY_GetConstantIntNumWords(self)
ptr = ffi.lib.LLVMPY_GetConstantIntRawValue(
self, byref(little_endian))
asbytes = bytes(cast(ptr, POINTER(c_uint64 * words)).contents)
return int.from_bytes(
asbytes,
('little' if little_endian.value else 'big'),
signed=signed_int,
)
elif self.value_kind == ValueKind.constant_fp:
# Convert floating-point values to double-precision (Python float)
accuracy_loss = c_bool(False)
value = ffi.lib.LLVMPY_GetConstantFPValue(self,
byref(accuracy_loss))
if accuracy_loss.value and not round_fp:
raise ValueError(
'Accuracy loss encountered in conversion of constant '
f'value {str(self)}')
return value
# Otherwise, return the IR string
return str(self)
|
ValueRef
|
python
|
pypa__pip
|
src/pip/_internal/index/collector.py
|
{
"start": 6005,
"end": 6345
}
|
class ____:
def __init__(self, page: IndexContent) -> None:
assert page.cache_link_parsing
self.page = page
def __eq__(self, other: object) -> bool:
return isinstance(other, type(self)) and self.page.url == other.page.url
def __hash__(self) -> int:
return hash(self.page.url)
|
CacheablePageContent
|
python
|
pytorch__pytorch
|
torch/_dynamo/testing.py
|
{
"start": 8982,
"end": 9277
}
|
class ____:
def __init__(self) -> None:
self.graphs: list[torch.fx.GraphModule] = []
def __call__(
self, gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor]
) -> Callable[..., Any]:
self.graphs.append(gm)
return gm.forward
|
EagerAndRecordGraphs
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/configs.py
|
{
"start": 1612,
"end": 3033
}
|
class ____(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
|
RegeditLexer
|
python
|
astropy__astropy
|
astropy/coordinates/angles/errors.py
|
{
"start": 527,
"end": 642
}
|
class ____(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
|
BoundsError
|
python
|
PrefectHQ__prefect
|
src/prefect/concurrency/_asyncio.py
|
{
"start": 948,
"end": 1079
}
|
class ____(Exception):
"""Raised when an unhandlable occurs while acquiring concurrency slots."""
|
ConcurrencySlotAcquisitionError
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataproc_metastore.py
|
{
"start": 26074,
"end": 31999
}
|
class ____(GoogleCloudBaseOperator):
"""
Export metadata from a service.
:param destination_gcs_folder: A Cloud Storage URI of a folder, in the format
``gs://<bucket_name>/<path_inside_bucket>``. A sub-folder
``<export_folder>`` containing exported files will be
created below it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(), StorageLink())
def __init__(
self,
*,
destination_gcs_folder: str,
project_id: str,
region: str,
service_id: str,
request_id: str | None = None,
database_dump_type: DatabaseDumpSpec | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.destination_gcs_folder = destination_gcs_folder
self.project_id = project_id
self.region = region
self.service_id = service_id
self.request_id = request_id
self.database_dump_type = database_dump_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"service_id": self.service_id,
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Exporting metadata from Dataproc Metastore service: %s", self.service_id)
hook.export_metadata(
destination_gcs_folder=self.destination_gcs_folder,
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
request_id=self.request_id,
database_dump_type=self.database_dump_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_export = self._wait_for_export_metadata(hook)
self.log.info("Metadata from service %s exported successfully", self.service_id)
DataprocMetastoreLink.persist(context=context, url=METASTORE_EXPORT_LINK)
uri = self._get_uri_from_destination(MetadataExport.to_dict(metadata_export)["destination_gcs_uri"])
StorageLink.persist(context=context, uri=uri, project_id=self.project_id)
return MetadataExport.to_dict(metadata_export)
def _get_uri_from_destination(self, destination_uri: str):
return destination_uri[5:] if destination_uri.startswith("gs://") else destination_uri
def _wait_for_export_metadata(self, hook: DataprocMetastoreHook):
"""
Check that export was created successfully.
This is a workaround to an issue parsing result to MetadataExport inside
the SDK.
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
time.sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
metadata_export: MetadataExport = activities.metadata_exports[0]
if metadata_export.state == MetadataExport.State.SUCCEEDED:
return metadata_export
if metadata_export.state == MetadataExport.State.FAILED:
raise AirflowException(
f"Exporting metadata from Dataproc Metastore {metadata_export.name} FAILED"
)
|
DataprocMetastoreExportMetadataOperator
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/common/hooks/base_google.py
|
{
"start": 29048,
"end": 32008
}
|
class ____(Token):
"""
A token implementation which makes Google credentials objects accessible to [gcloud-aio](https://talkiq.github.io/gcloud-aio/) clients.
This class allows us to create token instances from credentials objects and thus supports a variety of use cases for Google
credentials in Airflow (i.e. impersonation chain). By relying on a existing credentials object we leverage functionality provided by the GoogleBaseHook
for generating credentials objects.
"""
def __init__(
self,
credentials: Credentials,
*,
project: str | None = None,
session: ClientSession | None = None,
scopes: Sequence[str] | None = None,
) -> None:
_scopes: list[str] | None = list(scopes) if scopes else None
super().__init__(session=cast("Session", session), scopes=_scopes)
self.credentials = credentials
self.project = project
@classmethod
async def from_hook(
cls,
hook: GoogleBaseHook,
*,
session: ClientSession | None = None,
) -> _CredentialsToken:
credentials, project = hook.get_credentials_and_project_id()
return cls(
credentials=credentials,
project=project,
session=session,
scopes=hook.scopes,
)
async def get_project(self) -> str | None:
return self.project
async def refresh(self, *, timeout: int) -> TokenResponse:
await sync_to_async(self.credentials.refresh)(google.auth.transport.requests.Request())
self.access_token = cast("str", self.credentials.token)
self.access_token_duration = 3600
self.access_token_acquired_at = self._now()
return TokenResponse(value=self.access_token, expires_in=self.access_token_duration)
async def acquire_access_token(self, timeout: int = 10) -> None:
await self.refresh(timeout=timeout)
self.acquiring = None
async def ensure_token(self) -> None:
if self.acquiring and not self.acquiring.done():
await self.acquiring
return
if self.access_token:
delta = (self._now() - self.access_token_acquired_at).total_seconds()
if delta <= self.access_token_duration / 2:
return
self.acquiring = asyncio.ensure_future( # pylint: disable=used-before-assignment
self.acquire_access_token()
)
await self.acquiring
@staticmethod
def _now():
# access_token_acquired_at is specific to gcloud-aio's Token.
# On subsequent calls of `get` it will be used with `datetime.datetime.utcnow()`.
# Therefore we have to use an offset-naive datetime.
# https://github.com/talkiq/gcloud-aio/blob/f1132b005ba35d8059229a9ca88b90f31f77456d/auth/gcloud/aio/auth/token.py#L204
return datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None)
|
_CredentialsToken
|
python
|
pytest-dev__pytest
|
testing/test_debugging.py
|
{
"start": 2672,
"end": 32744
}
|
class ____:
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin("debugging")
monkeypatch.setattr(plugin, "post_mortem", mypdb)
return pdblist
def test_pdb_on_fail(self, pytester: Pytester, pdblist) -> None:
rep = runpdb_and_get_report(
pytester,
"""
def test_func():
assert 0
""",
)
assert rep.failed
assert len(pdblist) == 1
if sys.version_info < (3, 13):
tb = _pytest._code.Traceback(pdblist[0][0])
else:
tb = _pytest._code.Traceback(pdblist[0][0].__traceback__)
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, pytester: Pytester, pdblist) -> None:
rep = runpdb_and_get_report(
pytester,
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""",
)
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
pytester,
"""
import pytest
def test_func():
pytest.skip("hello")
""",
)
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_top_level_raise_skiptest(self, pytester, pdblist) -> None:
stdout = runpdb_and_get_stdout(
pytester,
"""
import unittest
raise unittest.SkipTest("This is a common way to skip an entire file.")
""",
)
assert "entering PDB" not in stdout, stdout
def test_pdb_on_BdbQuit(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
pytester,
"""
import bdb
def test_func():
raise bdb.BdbQuit
""",
)
assert rep.failed
assert len(pdblist) == 0
def test_pdb_on_KeyboardInterrupt(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
pytester,
"""
def test_func():
raise KeyboardInterrupt
""",
)
assert rep.failed
assert len(pdblist) == 1
@staticmethod
def flush(child):
if child.isalive():
# Read if the test has not (e.g. test_pdb_unittest_skip).
child.read()
child.wait()
assert not child.isalive()
def test_pdb_unittest_postmortem(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
child.sendline("p self.filename")
child.sendeof()
rest = child.read().decode("utf8")
assert "debug.me" in rest
self.flush(child)
def test_pdb_unittest_skip(self, pytester: Pytester) -> None:
"""Test for issue #2137"""
p1 = pytester.makepyfile(
"""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
"""
)
child = pytester.spawn_pytest(f"-rs --pdb {p1}")
child.expect("Skipping also with pdb active")
child.expect_exact("= 1 skipped in")
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout_and_stderr(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
print("get\\x20rekt")
assert False
def test_not_called_due_to_quit():
pass
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("captured stdout")
child.expect("get rekt")
child.expect("captured stderr")
child.expect("get rekt")
child.expect("traceback")
child.expect("def test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "Exit: Quitting debugger" in rest
assert "= 1 failed in" in rest
assert "def test_1" not in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(
self, pytester: Pytester
) -> None:
p1 = pytester.makepyfile(
"""
def test_1():
assert False
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
@pytest.mark.parametrize("showcapture", ["all", "no", "log"])
def test_pdb_print_captured_logs(self, pytester, showcapture: str) -> None:
p1 = pytester.makepyfile(
"""
def test_1():
import logging
logging.warning("get " + "rekt")
assert False
"""
)
child = pytester.spawn_pytest(f"--show-capture={showcapture} --pdb {p1}")
if showcapture in ("all", "log"):
child.expect("captured log")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_print_captured_logs_nologging(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_1():
import logging
logging.warning("get " + "rekt")
assert False
"""
)
child = pytester.spawn_pytest(f"--show-capture=all --pdb -p no:logging {p1}")
child.expect("get rekt")
output = child.before.decode("utf8")
assert "captured log" not in output
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_exception(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("Pdb")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
xxx
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
# child.expect(".*import pytest.*")
child.expect("Pdb")
child.sendline("c")
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_runtest_protocol():
0/0
"""
)
p1 = pytester.makepyfile("def test_func(): pass")
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
# INTERNALERROR is only displayed once via terminal reporter.
assert (
len(
[
x
for x in child.before.decode().splitlines()
if x.startswith("INTERNALERROR> Traceback")
]
)
== 1
)
child.sendeof()
self.flush(child)
def test_pdb_prevent_ConftestImportFailure_hiding_exception(
self, pytester: Pytester
) -> None:
pytester.makepyfile("def test_func(): pass")
sub_dir = pytester.path.joinpath("ns")
sub_dir.mkdir()
sub_dir.joinpath("conftest").with_suffix(".py").write_text(
"import unknown", "utf-8"
)
sub_dir.joinpath("test_file").with_suffix(".py").write_text(
"def test_func(): pass", "utf-8"
)
result = pytester.runpytest_subprocess("--pdb", ".")
result.stdout.fnmatch_lines(["-> import unknown"])
@pytest.mark.xfail(reason="#10042", strict=False)
def test_pdb_interaction_capturing_simple(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_1():
i = 0
print("hello17")
pytest.set_trace()
i == 1
assert 0
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect(r"test_1\(\)")
child.expect("i == 1")
child.expect("Pdb")
child.sendline("c")
rest = child.read().decode("utf-8")
assert "AssertionError" in rest
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_kwargs(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_1():
i = 0
print("hello17")
pytest.set_trace(header="== my_header ==")
x = 3
assert 0
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("== my_header ==")
assert "PDB set_trace" not in child.before.decode()
child.expect("Pdb")
child.sendline("c")
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("q")
rest = child.read().decode("utf8")
assert "no tests ran" in rest
assert "reading from stdin while output" not in rest
assert "BdbQuit" not in rest
self.flush(child)
def test_pdb_and_capsys(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_1(capsys):
print("hello1")
pytest.set_trace()
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_with_caplog_on_pdb_invocation(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_1(capsys, caplog):
import logging
logging.getLogger(__name__).warning("some_warning")
assert 0
"""
)
child = pytester.spawn_pytest(f"--pdb {p1!s}")
child.send("caplog.record_tuples\n")
child.expect_exact(
"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]"
)
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print("hello")
assert 0
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
"""
)
child = pytester.spawn_pytest(f"--doctest-modules --pdb {p1}")
child.expect("Pdb")
assert "UNEXPECTED EXCEPTION: AssertionError()" in child.before.decode("utf8")
child.sendline("'i=%i.' % i")
child.expect("Pdb")
assert "\r\n'i=0.'\r\n" in child.before.decode("utf8")
child.sendeof()
rest = child.read().decode("utf8")
assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
assert "BdbQuit" not in rest
assert "1 failed" in rest
self.flush(child)
def test_doctest_set_trace_quit(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def function_1():
'''
>>> __import__('pdb').set_trace()
'''
"""
)
# NOTE: does not use pytest.set_trace, but Python's patched pdb,
# therefore "-s" is required.
child = pytester.spawn_pytest(f"--doctest-modules --pdb -s {p1}")
child.expect("Pdb")
child.sendline("q")
rest = child.read().decode("utf8")
assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
assert "= no tests ran in" in rest
assert "BdbQuit" not in rest
assert "UNEXPECTED EXCEPTION" not in rest
@pytest.mark.xfail(reason="#10042", strict=False)
def test_pdb_interaction_capturing_twice(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_1():
i = 0
print("hello17")
pytest.set_trace()
x = 3
print("hello18")
pytest.set_trace()
x = 4
assert 0
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
child.expect("test_1")
child.expect("x = 3")
child.expect("Pdb")
child.sendline("c")
child.expect(r"PDB continue \(IO-capturing resumed\)")
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
child.expect("x = 4")
child.expect("Pdb")
child.sendline("c")
child.expect("_ test_1 _")
child.expect("def test_1")
rest = child.read().decode("utf8")
assert "Captured stdout call" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
assert "1 failed" in rest
self.flush(child)
@pytest.mark.xfail(reason="#10042", strict=False)
def test_pdb_with_injected_do_debug(self, pytester: Pytester) -> None:
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
self.__class__ in do_continue.
"""
p1 = pytester.makepyfile(
mytest="""
import pdb
import pytest
count_continue = 0
class CustomPdb(pdb.Pdb, object):
def do_debug(self, arg):
import sys
import types
do_debug_func = pdb.Pdb.do_debug
newglobals = do_debug_func.__globals__.copy()
newglobals['Pdb'] = self.__class__
orig_do_debug = types.FunctionType(
do_debug_func.__code__, newglobals,
do_debug_func.__name__, do_debug_func.__defaults__,
)
return orig_do_debug(self, arg)
do_debug.__doc__ = pdb.Pdb.do_debug.__doc__
def do_continue(self, *args, **kwargs):
global count_continue
count_continue += 1
return super(CustomPdb, self).do_continue(*args, **kwargs)
def foo():
print("print_from_foo")
def test_1():
i = 0
print("hello17")
pytest.set_trace()
x = 3
print("hello18")
assert count_continue == 2, "unexpected_failure: %d != 2" % count_continue
pytest.fail("expected_failure")
"""
)
child = pytester.spawn_pytest(f"--pdbcls=mytest:CustomPdb {p1!s}")
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
child.expect(r"\n\(Pdb")
child.sendline("debug foo()")
child.expect("ENTERING RECURSIVE DEBUGGER")
child.expect(r"\n\(\(Pdb")
child.sendline("c")
child.expect("LEAVING RECURSIVE DEBUGGER")
assert b"PDB continue" not in child.before
# No extra newline.
assert child.before.endswith(b"c\r\nprint_from_foo\r\n")
# set_debug should not raise outcomes. Exit, if used recursively.
child.sendline("debug 42")
child.sendline("q")
child.expect("LEAVING RECURSIVE DEBUGGER")
assert b"ENTERING RECURSIVE DEBUGGER" in child.before
assert b"Quitting debugger" not in child.before
child.sendline("c")
child.expect(r"PDB continue \(IO-capturing resumed\)")
rest = child.read().decode("utf8")
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
assert "1 failed" in rest
assert "Failed: expected_failure" in rest
assert "AssertionError: unexpected_failure" not in rest
self.flush(child)
def test_pdb_without_capture(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_1():
pytest.set_trace()
"""
)
child = pytester.spawn_pytest(f"-s {p1}")
child.expect(r">>> PDB set_trace >>>")
child.expect("Pdb")
child.sendline("c")
child.expect(r">>> PDB continue >>>")
child.expect("1 passed")
self.flush(child)
@pytest.mark.parametrize("capture_arg", ("", "-s", "-p no:capture"))
def test_pdb_continue_with_recursive_debug(
self, capture_arg, pytester: Pytester
) -> None:
"""Full coverage for do_debug without capturing.
This is very similar to test_pdb_interaction_continue_recursive in general,
but mocks out ``pdb.set_trace`` for providing more coverage.
"""
p1 = pytester.makepyfile(
"""
try:
input = raw_input
except NameError:
pass
def set_trace():
__import__('pdb').set_trace()
def test_1(monkeypatch):
import _pytest.debugging
class pytestPDBTest(_pytest.debugging.pytestPDB):
@classmethod
def set_trace(cls, *args, **kwargs):
# Init PytestPdbWrapper to handle capturing.
_pdb = cls._init_pdb("set_trace", *args, **kwargs)
# Mock out pdb.Pdb.do_continue.
import pdb
pdb.Pdb.do_continue = lambda self, arg: None
print("===" + " SET_TRACE ===")
assert input() == "debug set_trace()"
# Simulate PytestPdbWrapper.do_debug
cls._recursive_debug += 1
print("ENTERING RECURSIVE DEBUGGER")
print("===" + " SET_TRACE_2 ===")
assert input() == "c"
_pdb.do_continue("")
print("===" + " SET_TRACE_3 ===")
# Simulate PytestPdbWrapper.do_debug
print("LEAVING RECURSIVE DEBUGGER")
cls._recursive_debug -= 1
print("===" + " SET_TRACE_4 ===")
assert input() == "c"
_pdb.do_continue("")
def do_continue(self, arg):
print("=== do_continue")
monkeypatch.setattr(_pytest.debugging, "pytestPDB", pytestPDBTest)
import pdb
monkeypatch.setattr(pdb, "set_trace", pytestPDBTest.set_trace)
set_trace()
"""
)
child = pytester.spawn_pytest(f"--tb=short {p1} {capture_arg}")
child.expect("=== SET_TRACE ===")
before = child.before.decode("utf8")
if not capture_arg:
assert ">>> PDB set_trace (IO-capturing turned off) >>>" in before
else:
assert ">>> PDB set_trace >>>" in before
child.sendline("debug set_trace()")
child.expect("=== SET_TRACE_2 ===")
before = child.before.decode("utf8")
assert "\r\nENTERING RECURSIVE DEBUGGER\r\n" in before
child.sendline("c")
child.expect("=== SET_TRACE_3 ===")
# No continue message with recursive debugging.
before = child.before.decode("utf8")
assert ">>> PDB continue " not in before
child.sendline("c")
child.expect("=== SET_TRACE_4 ===")
before = child.before.decode("utf8")
assert "\r\nLEAVING RECURSIVE DEBUGGER\r\n" in before
child.sendline("c")
rest = child.read().decode("utf8")
if not capture_arg:
assert "> PDB continue (IO-capturing resumed) >" in rest
else:
assert "> PDB continue >" in rest
assert "= 1 passed in" in rest
def test_pdb_used_outside_test(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
pytest.set_trace()
x = 5
"""
)
if sys.version_info[:2] >= (3, 13):
break_line = "pytest.set_trace()"
else:
break_line = "x = 5"
child = pytester.spawn(f"{sys.executable} {p1}")
child.expect_exact(break_line)
child.expect_exact("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
"""
)
if sys.version_info[:2] >= (3, 13):
break_line = "pytest.set_trace()"
else:
break_line = "x = 5"
child = pytester.spawn_pytest(str(p1))
child.expect_exact(break_line)
child.expect_exact("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile("xxx")
result = pytester.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines(
["E NameError: *xxx*", "*! *Exit: Quitting debugger !*"] # due to EOF
)
@pytest.mark.parametrize("post_mortem", (False, True))
def test_enter_leave_pdb_hooks_are_called(
self, post_mortem, pytester: Pytester
) -> None:
pytester.makeconftest(
"""
mypdb = None
def pytest_configure(config):
config.testing_verification = 'configured'
def pytest_enter_pdb(config, pdb):
assert config.testing_verification == 'configured'
print('enter_pdb_hook')
global mypdb
mypdb = pdb
mypdb.set_attribute = "bar"
def pytest_leave_pdb(config, pdb):
assert config.testing_verification == 'configured'
print('leave_pdb_hook')
global mypdb
assert mypdb is pdb
assert mypdb.set_attribute == "bar"
"""
)
p1 = pytester.makepyfile(
"""
import pytest
def test_set_trace():
pytest.set_trace()
assert 0
def test_post_mortem():
assert 0
"""
)
if post_mortem:
child = pytester.spawn_pytest(str(p1) + " --pdb -s -k test_post_mortem")
else:
child = pytester.spawn_pytest(str(p1) + " -k test_set_trace")
child.expect("enter_pdb_hook")
child.sendline("c")
if post_mortem:
child.expect(r"PDB continue")
else:
child.expect(r"PDB continue \(IO-capturing resumed\)")
child.expect("Captured stdout call")
rest = child.read().decode("utf8")
assert "leave_pdb_hook" in rest
assert "1 failed" in rest
self.flush(child)
def test_pdb_custom_cls(
self, pytester: Pytester, custom_pdb_calls: list[str]
) -> None:
p1 = pytester.makepyfile("""xxx """)
result = pytester.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomPdb", p1
)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_invalid(self, pytester: Pytester) -> None:
result = pytester.runpytest_inprocess("--pdbcls=invalid")
result.stderr.fnmatch_lines(
[
"*: error: argument --pdbcls: 'invalid' is not in the format 'modname:classname'"
]
)
def test_pdb_validate_usepdb_cls(self):
assert _validate_usepdb_cls("os.path:dirname.__name__") == (
"os.path",
"dirname.__name__",
)
assert _validate_usepdb_cls("pdb:DoesNotExist") == ("pdb", "DoesNotExist")
def test_pdb_custom_cls_without_pdb(
self, pytester: Pytester, custom_pdb_calls: list[str]
) -> None:
p1 = pytester.makepyfile("""xxx """)
result = pytester.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_set_trace(
self,
pytester: Pytester,
monkeypatch: MonkeyPatch,
) -> None:
pytester.makepyfile(
custom_pdb="""
class CustomPdb(object):
def __init__(self, *args, **kwargs):
skip = kwargs.pop("skip")
assert skip == ["foo.*"]
print("__init__")
super(CustomPdb, self).__init__(*args, **kwargs)
def set_trace(*args, **kwargs):
print('custom set_trace>')
"""
)
p1 = pytester.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace(skip=['foo.*'])
"""
)
monkeypatch.setenv("PYTHONPATH", str(pytester.path))
child = pytester.spawn_pytest(f"--pdbcls=custom_pdb:CustomPdb {p1!s}")
child.expect("__init__")
child.expect("custom set_trace>")
self.flush(child)
@pytest.mark.skipif(
sys.version_info < (3, 13),
reason="Navigating exception chains was introduced in 3.13",
)
def test_pdb_exception_chain_navigation(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def inner_raise():
is_inner = True
raise RuntimeError("Woops")
def outer_raise():
is_inner = False
try:
inner_raise()
except RuntimeError:
raise RuntimeError("Woopsie")
def test_1():
outer_raise()
assert True
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
child.sendline("is_inner")
child.expect_exact("False")
child.sendline("exceptions 0")
child.sendline("is_inner")
child.expect_exact("True")
child.sendeof()
self.flush(child)
def test_pdb_wrapped_commands_docstrings(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_1():
assert False
"""
)
child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
# Verify no undocumented commands
child.sendline("help")
child.expect("Documented commands")
assert "Undocumented commands" not in child.before.decode()
child.sendline("help continue")
child.expect("Continue execution")
child.expect("Pdb")
child.sendline("help debug")
child.expect("Enter a recursive debugger")
child.expect("Pdb")
child.sendline("c")
child.sendeof()
self.flush(child)
|
TestPDB
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/scrapfly_web/base.py
|
{
"start": 307,
"end": 2898
}
|
class ____(BasePydanticReader):
"""
Turn a url to llm accessible markdown with `Scrapfly.io`.
Args:
api_key: The Scrapfly API key.
scrape_config: The Scrapfly ScrapeConfig object.
ignore_scrape_failures: Whether to continue on failures.
urls: List of urls to scrape.
scrape_format: Scrape result format (markdown or text)
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
api_key: str
ignore_scrape_failures: bool = True
scrapfly: "ScrapflyClient"
def __init__(self, api_key: str, ignore_scrape_failures: bool = True) -> None:
"""Initialize client."""
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
scrapfly = ScrapflyClient(key=api_key)
super().__init__(
api_key=api_key,
ignore_scrape_failures=ignore_scrape_failures,
scrapfly=scrapfly,
)
@classmethod
def class_name(cls) -> str:
return "Scrapfly_reader"
def load_data(
self,
urls: List[str],
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls: List[str]): List of URLs to scrape.
scrape_config: Optional[dict]: Dictionary of ScrapFly scrape config object.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If URLs aren't provided.
"""
from scrapfly import ScrapeApiResponse, ScrapeConfig
if urls is None:
raise ValueError("URLs must be provided.")
scrape_config = scrape_config if scrape_config is not None else {}
documents = []
for url in urls:
try:
response: ScrapeApiResponse = self.scrapfly.scrape(
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
documents.append(
Document(
text=response.scrape_result["content"], extra_info={"url": url}
)
)
except Exception as e:
if self.ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e # noqa: TRY201
return documents
|
ScrapflyReader
|
python
|
vyperlang__vyper
|
vyper/builtins/functions.py
|
{
"start": 73262,
"end": 73592
}
|
class ____(BuiltinFunctionT):
_id = "sqrt"
_inputs = [("d", DecimalT())]
_return_type = DecimalT()
def fetch_call_return(self, node):
message = "The `sqrt` builtin was removed. Instead import module "
message += "`math` and use `math.sqrt()`"
raise UnimplementedException(message, node)
|
Sqrt
|
python
|
joke2k__faker
|
tests/providers/test_bank.py
|
{
"start": 13651,
"end": 14117
}
|
class ____:
"""Test th_TH bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{10}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == ThThBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{10}", iban[2:])
|
TestThTh
|
python
|
PrefectHQ__prefect
|
tests/server/utilities/test_text_search_parser.py
|
{
"start": 4023,
"end": 4941
}
|
class ____:
"""Test required/AND syntax with + prefix (future feature)"""
def test_plus_prefix_single_term(self):
result = parse_text_search_query("+required")
assert result == TextSearchQuery(include=[], exclude=[], required=["required"])
def test_plus_only(self):
result = parse_text_search_query("+")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_mixed_required_with_other_terms(self):
result = parse_text_search_query("include +required -excluded")
assert result == TextSearchQuery(
include=["include"], exclude=["excluded"], required=["required"]
)
def test_multiple_required_terms(self):
result = parse_text_search_query("+error +connection")
assert result == TextSearchQuery(
include=[], exclude=[], required=["error", "connection"]
)
|
TestRequiredTerms
|
python
|
huggingface__transformers
|
tests/models/bert/test_tokenization_bert.py
|
{
"start": 885,
"end": 2733
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = ["google-bert/bert-base-uncased"]
tokenizer_class = BertTokenizer
integration_expected_tokens = ['[UNK]', 'is', 'a', 'test', '[UNK]', '[UNK]', 'was', 'born', 'in', '92', '##00', '##0', ',', 'and', 'this', 'is', '[UNK]', '.', '生', '[UNK]', '的', '真', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '<', 's', '>', 'hi', '<', 's', '>', 'there', '[UNK]', 'following', 'string', 'should', 'be', 'properly', 'encoded', ':', '[UNK]', '.', '[UNK]', 'ir', '##d', 'and', '[UNK]', 'ir', '##d', '[UNK]', '[UNK]', 'how', 'are', 'you', 'doing'] # fmt: skip
integration_expected_token_ids = [100, 2003, 1037, 3231, 100, 100, 2001, 2141, 1999, 6227, 8889, 2692, 1010, 1998, 2023, 2003, 100, 1012, 1910, 100, 1916, 1921, 100, 100, 100, 100, 100, 100, 100, 1026, 1055, 1028, 7632, 1026, 1055, 1028, 2045, 100, 2206, 5164, 2323, 2022, 7919, 12359, 1024, 100, 1012, 100, 20868, 2094, 1998, 100, 20868, 2094, 100, 100, 2129, 2024, 2017, 2725] # fmt: skip
expected_tokens_from_ids = ['[UNK]', 'is', 'a', 'test', '[UNK]', '[UNK]', 'was', 'born', 'in', '92', '##00', '##0', ',', 'and', 'this', 'is', '[UNK]', '.', '生', '[UNK]', '的', '真', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '<', 's', '>', 'hi', '<', 's', '>', 'there', '[UNK]', 'following', 'string', 'should', 'be', 'properly', 'encoded', ':', '[UNK]', '.', '[UNK]', 'ir', '##d', 'and', '[UNK]', 'ir', '##d', '[UNK]', '[UNK]', 'how', 'are', 'you', 'doing'] # fmt: skip
integration_expected_decoded_text = "[UNK] is a test [UNK] [UNK] was born in 92000, and this is [UNK]. 生 [UNK] 的 真 [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] < s > hi < s > there [UNK] following string should be properly encoded : [UNK]. [UNK] ird and [UNK] ird [UNK] [UNK] how are you doing"
|
BertTokenizationTest
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/sensors/test_sagemaker_endpoint.py
|
{
"start": 1662,
"end": 3219
}
|
class ____:
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(SageMakerHook, "describe_endpoint")
def test_sensor_with_failure(self, mock_describe, mock_get_conn):
mock_describe.side_effect = [DESCRIBE_ENDPOINT_FAILED_RESPONSE]
sensor = SageMakerEndpointSensor(
task_id="test_task", poke_interval=1, aws_conn_id="aws_test", endpoint_name="test_job_name"
)
with pytest.raises(AirflowException):
sensor.execute(None)
mock_describe.assert_called_once_with("test_job_name")
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(SageMakerHook, "__init__")
@mock.patch.object(SageMakerHook, "describe_endpoint")
def test_sensor(self, mock_describe, hook_init, mock_get_conn):
hook_init.return_value = None
mock_describe.side_effect = [
DESCRIBE_ENDPOINT_CREATING_RESPONSE,
DESCRIBE_ENDPOINT_UPDATING_RESPONSE,
DESCRIBE_ENDPOINT_INSERVICE_RESPONSE,
]
sensor = SageMakerEndpointSensor(
task_id="test_task", poke_interval=0, aws_conn_id="aws_test", endpoint_name="test_job_name"
)
sensor.execute(None)
# make sure we called 3 times(terminated when its completed)
assert mock_describe.call_count == 3
# make sure the hook was initialized with the specific params
calls = [mock.call(aws_conn_id="aws_test", config=None, verify=None, region_name=None)]
hook_init.assert_has_calls(calls)
|
TestSageMakerEndpointSensor
|
python
|
doocs__leetcode
|
solution/0100-0199/0156.Binary Tree Upside Down/Solution.py
|
{
"start": 192,
"end": 560
}
|
class ____:
def upsideDownBinaryTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root is None or root.left is None:
return root
new_root = self.upsideDownBinaryTree(root.left)
root.left.right = root
root.left.left = root.right
root.left = None
root.right = None
return new_root
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/1289. Minimum Falling Path Sum II/1289.py
|
{
"start": 0,
"end": 414
}
|
class ____:
def minFallingPathSum(self, grid: list[list[int]]) -> int:
n = len(grid)
for i in range(1, n):
(firstMinNum, firstMinIndex), (secondMinNum, _) = sorted(
{(a, i) for i, a in enumerate(grid[i - 1])})[:2]
for j in range(n):
if j == firstMinIndex:
grid[i][j] += secondMinNum
else:
grid[i][j] += firstMinNum
return min(grid[-1])
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/utils/generic_utils.py
|
{
"start": 40600,
"end": 41603
}
|
class ____(python_types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies."""
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
# Aliases
custom_object_scope = CustomObjectScope # pylint: disable=invalid-name
|
LazyLoader
|
python
|
getsentry__sentry
|
src/sentry/middleware/reporting_endpoint.py
|
{
"start": 162,
"end": 1017
}
|
class ____:
"""
Add ReportingEndpoint header for Sentry staff users only.
"""
def __init__(self, get_response: Callable[[HttpRequest], HttpResponseBase]):
self.get_response = get_response
def __call__(self, request: HttpRequest) -> HttpResponseBase:
response = self.get_response(request)
try:
enabled = options.get("issues.browser_reporting.reporting_endpoints_header_enabled")
if enabled:
# This will enable crashes, intervention and deprecation warnings
# They always report to the default endpoint
response["Reporting-Endpoints"] = (
"default=https://sentry.my.sentry.io/api/0/reporting-api-experiment/"
)
except Exception:
pass
return response
|
ReportingEndpointMiddleware
|
python
|
getsentry__sentry-python
|
sentry_sdk/tracing.py
|
{
"start": 6389,
"end": 7267
}
|
class ____:
"""Limits the number of spans recorded in a transaction."""
__slots__ = ("maxlen", "spans", "dropped_spans")
def __init__(self, maxlen):
# type: (int) -> None
# FIXME: this is `maxlen - 1` only to preserve historical behavior
# enforced by tests.
# Either this should be changed to `maxlen` or the JS SDK implementation
# should be changed to match a consistent interpretation of what maxlen
# limits: either transaction+spans or only child spans.
self.maxlen = maxlen - 1
self.spans = [] # type: List[Span]
self.dropped_spans = 0 # type: int
def add(self, span):
# type: (Span) -> None
if len(self.spans) > self.maxlen:
span._span_recorder = None
self.dropped_spans += 1
else:
self.spans.append(span)
|
_SpanRecorder
|
python
|
ray-project__ray
|
rllib/execution/minibatch_buffer.py
|
{
"start": 111,
"end": 1952
}
|
class ____:
"""Ring buffer of recent data batches for minibatch SGD.
This is for use with AsyncSamplesOptimizer.
"""
def __init__(
self,
inqueue: queue.Queue,
size: int,
timeout: float,
num_passes: int,
init_num_passes: int = 1,
):
"""Initialize a minibatch buffer.
Args:
inqueue (queue.Queue): Queue to populate the internal ring buffer
from.
size: Max number of data items to buffer.
timeout: Queue timeout
num_passes: Max num times each data item should be emitted.
init_num_passes: Initial passes for each data item.
Maxiumum number of passes per item are increased to num_passes over
time.
"""
self.inqueue = inqueue
self.size = size
self.timeout = timeout
self.max_initial_ttl = num_passes
self.cur_initial_ttl = init_num_passes
self.buffers = [None] * size
self.ttl = [0] * size
self.idx = 0
def get(self) -> Tuple[Any, bool]:
"""Get a new batch from the internal ring buffer.
Returns:
buf: Data item saved from inqueue.
released: True if the item is now removed from the ring buffer.
"""
if self.ttl[self.idx] <= 0:
self.buffers[self.idx] = self.inqueue.get(timeout=self.timeout)
self.ttl[self.idx] = self.cur_initial_ttl
if self.cur_initial_ttl < self.max_initial_ttl:
self.cur_initial_ttl += 1
buf = self.buffers[self.idx]
self.ttl[self.idx] -= 1
released = self.ttl[self.idx] <= 0
if released:
self.buffers[self.idx] = None
self.idx = (self.idx + 1) % len(self.buffers)
return buf, released
|
MinibatchBuffer
|
python
|
scikit-learn__scikit-learn
|
asv_benchmarks/benchmarks/model_selection.py
|
{
"start": 1190,
"end": 2371
}
|
class ____(Predictor, Estimator, Benchmark):
"""
Benchmarks for GridSearch.
"""
timeout = 20000
param_names = ["n_jobs"]
params = (Benchmark.n_jobs_vals,)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000, n_features=100)
return data
def make_estimator(self, params):
(n_jobs,) = params
clf = RandomForestClassifier(random_state=0)
if Benchmark.data_size == "large":
n_estimators_list = [10, 25, 50, 100, 500]
max_depth_list = [5, 10, None]
max_features_list = [0.1, 0.4, 0.8, 1.0]
else:
n_estimators_list = [10, 25, 50]
max_depth_list = [5, 10]
max_features_list = [0.1, 0.4, 0.8]
param_grid = {
"n_estimators": n_estimators_list,
"max_depth": max_depth_list,
"max_features": max_features_list,
}
estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
|
GridSearchBenchmark
|
python
|
cloudpipe__cloudpickle
|
tests/cloudpickle_test.py
|
{
"start": 112176,
"end": 115432
}
|
class ____(CloudPickleTest):
protocol = 2
def test_lookup_module_and_qualname_dynamic_typevar():
T = typing.TypeVar("T")
module_and_name = _lookup_module_and_qualname(T, name=T.__name__)
assert module_and_name is None
def test_lookup_module_and_qualname_importable_typevar():
_cloudpickle_testpkg = pytest.importorskip("_cloudpickle_testpkg")
T = _cloudpickle_testpkg.T
module_and_name = _lookup_module_and_qualname(T, name=T.__name__)
assert module_and_name is not None
module, name = module_and_name
assert module is _cloudpickle_testpkg
assert name == "T"
def test_lookup_module_and_qualname_stdlib_typevar():
module_and_name = _lookup_module_and_qualname(
typing.AnyStr, name=typing.AnyStr.__name__
)
assert module_and_name is not None
module, name = module_and_name
assert module is typing
assert name == "AnyStr"
def test_register_pickle_by_value():
pkg = pytest.importorskip("_cloudpickle_testpkg")
mod = pytest.importorskip("_cloudpickle_testpkg.mod")
assert list_registry_pickle_by_value() == set()
register_pickle_by_value(pkg)
assert list_registry_pickle_by_value() == {pkg.__name__}
register_pickle_by_value(mod)
assert list_registry_pickle_by_value() == {pkg.__name__, mod.__name__}
unregister_pickle_by_value(mod)
assert list_registry_pickle_by_value() == {pkg.__name__}
msg = f"Input should be a module object, got {pkg.__name__} instead"
with pytest.raises(ValueError, match=msg):
unregister_pickle_by_value(pkg.__name__)
unregister_pickle_by_value(pkg)
assert list_registry_pickle_by_value() == set()
msg = f"{pkg} is not registered for pickle by value"
with pytest.raises(ValueError, match=re.escape(msg)):
unregister_pickle_by_value(pkg)
msg = f"Input should be a module object, got {pkg.__name__} instead"
with pytest.raises(ValueError, match=msg):
register_pickle_by_value(pkg.__name__)
dynamic_mod = types.ModuleType("dynamic_mod")
msg = (
f"{dynamic_mod} was not imported correctly, have you used an "
"`import` statement to access it?"
)
with pytest.raises(ValueError, match=re.escape(msg)):
register_pickle_by_value(dynamic_mod)
def _all_types_to_test():
T = typing.TypeVar("T")
class C(typing.Generic[T]):
pass
types_to_test = [
C,
C[int],
T,
typing.Any,
typing.Optional,
typing.Generic,
typing.Union,
typing.Optional[int],
typing.Generic[T],
typing.Callable[[int], typing.Any],
typing.Callable[..., typing.Any],
typing.Callable[[], typing.Any],
typing.Tuple[int, ...],
typing.Tuple[int, C[int]],
typing.List[int],
typing.Dict[int, str],
typing.ClassVar,
typing.ClassVar[C[int]],
typing.NoReturn,
]
return types_to_test
def test_module_level_pickler():
# #366: cloudpickle should expose its pickle.Pickler subclass as
# cloudpickle.Pickler
assert hasattr(cloudpickle, "Pickler")
assert cloudpickle.Pickler is cloudpickle.CloudPickler
if __name__ == "__main__":
unittest.main()
|
Protocol2CloudPickleTest
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
|
{
"start": 56660,
"end": 57488
}
|
class ____(BaseModel):
class Config:
extra = Extra.forbid
type: Literal["DeclarativeSource"]
check: CheckStream
streams: List[DeclarativeStream]
version: str = Field(
...,
description="The version of the Airbyte CDK used to build and test the source.",
)
schemas: Optional[Schemas] = None
definitions: Optional[Dict[str, Any]] = None
spec: Optional[Spec] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description="For internal Airbyte use only - DO NOT modify manually. Used by consumers of declarative manifests for storing related metadata.",
)
description: Optional[str] = Field(
None,
description="A description of the connector. It will be presented on the Source documentation page.",
)
|
DeclarativeSource
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-nodes-with-the-highest-score.py
|
{
"start": 1240,
"end": 1985
}
|
class ____(object):
def countHighestScoreNodes(self, parents):
"""
:type parents: List[int]
:rtype: int
"""
def dfs(adj, i, result):
cnts = [dfs(adj, child, result) for child in adj[i]]
total = sum(cnts)+1
score = max((len(adj)-total), 1)*reduce(lambda x, y: x*y, cnts, 1)
if score > result[0]:
result[:] = [score, 1]
elif score == result[0]:
result[1] += 1
return total
adj = [[] for _ in xrange(len(parents))] # Space: O(n)
for i in xrange(1, len(parents)):
adj[parents[i]].append(i)
result = [0]*2
dfs(adj, 0, result)
return result[1]
|
Solution2
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/projections/polar.py
|
{
"start": 5229,
"end": 6970
}
|
class ____(mtransforms.Affine2DBase):
r"""
The affine part of the polar projection.
Scales the output so that maximum radius rests on the edge of the Axes
circle and the origin is mapped to (0.5, 0.5). The transform applied is
the same to x and y components and given by:
.. math::
x_{1} = 0.5 \left [ \frac{x_{0}}{(r_{\max} - r_{\min})} + 1 \right ]
:math:`r_{\min}, r_{\max}` are the minimum and maximum radial limits after
any scaling (e.g. log scaling) has been removed.
"""
def __init__(self, scale_transform, limits):
"""
Parameters
----------
scale_transform : `~matplotlib.transforms.Transform`
Scaling transform for the data. This is used to remove any scaling
from the radial view limits.
limits : `~matplotlib.transforms.BboxBase`
View limits of the data. The only part of its bounds that is used
is the y limits (for the radius limits).
"""
super().__init__()
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
__str__ = mtransforms._make_str_method("_scale_transform", "_limits")
def get_matrix(self):
# docstring inherited
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = mtransforms.Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
|
PolarAffine
|
python
|
ray-project__ray
|
release/llm_tests/benchmark/load_test.py
|
{
"start": 9824,
"end": 10131
}
|
class ____(OpenAIProvider):
def format_payload(self, prompt, max_tokens, images):
data = super().format_payload(prompt, max_tokens, images)
data["min_tokens"] = max_tokens
data["prompt_cache_max_len"] = self.parsed_options.prompt_cache_max_len
return data
|
FireworksProvider
|
python
|
pypa__setuptools
|
setuptools/tests/config/test_apply_pyprojecttoml.py
|
{
"start": 12274,
"end": 17660
}
|
class ____:
def base_pyproject(
self,
tmp_path,
additional_text="",
license_toml='license = {file = "LICENSE.txt"}\n',
):
text = PEP639_LICENSE_EXPRESSION
# Sanity-check
assert 'license = "mit or apache-2.0"' in text
assert 'license-files' not in text
assert "[tool.setuptools]" not in text
text = re.sub(
r"(license = .*)\n",
license_toml,
text,
count=1,
)
assert license_toml in text # sanity check
text = f"{text}\n{additional_text}\n"
pyproject = _pep621_example_project(tmp_path, "README", pyproject_text=text)
return pyproject
def base_pyproject_license_pep639(self, tmp_path, additional_text=""):
return self.base_pyproject(
tmp_path,
additional_text=additional_text,
license_toml='license = "licenseref-Proprietary"'
'\nlicense-files = ["_FILE*"]\n',
)
def test_both_license_and_license_files_defined(self, tmp_path):
setuptools_config = '[tool.setuptools]\nlicense-files = ["_FILE*"]'
pyproject = self.base_pyproject(tmp_path, setuptools_config)
(tmp_path / "_FILE.txt").touch()
(tmp_path / "_FILE.rst").touch()
# Would normally match the `license_files` patterns, but we want to exclude it
# by being explicit. On the other hand, contents should be added to `license`
license = tmp_path / "LICENSE.txt"
license.write_text("LicenseRef-Proprietary\n", encoding="utf-8")
msg1 = "'tool.setuptools.license-files' is deprecated in favor of 'project.license-files'"
msg2 = ".project.license. as a TOML table is deprecated"
with (
pytest.warns(SetuptoolsDeprecationWarning, match=msg1),
pytest.warns(SetuptoolsDeprecationWarning, match=msg2),
):
dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
assert set(dist.metadata.license_files) == {"_FILE.rst", "_FILE.txt"}
assert dist.metadata.license == "LicenseRef-Proprietary\n"
def test_both_license_and_license_files_defined_pep639(self, tmp_path):
# Set license and license-files
pyproject = self.base_pyproject_license_pep639(tmp_path)
(tmp_path / "_FILE.txt").touch()
(tmp_path / "_FILE.rst").touch()
msg = "Normalizing.*LicenseRef"
with pytest.warns(InformationOnly, match=msg):
dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
assert set(dist.metadata.license_files) == {"_FILE.rst", "_FILE.txt"}
assert dist.metadata.license is None
assert dist.metadata.license_expression == "LicenseRef-Proprietary"
def test_license_files_defined_twice(self, tmp_path):
# Set project.license-files and tools.setuptools.license-files
setuptools_config = '[tool.setuptools]\nlicense-files = ["_FILE*"]'
pyproject = self.base_pyproject_license_pep639(tmp_path, setuptools_config)
msg = "'project.license-files' is defined already. Remove 'tool.setuptools.license-files'"
with pytest.raises(InvalidConfigError, match=msg):
pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
def test_default_patterns(self, tmp_path):
setuptools_config = '[tool.setuptools]\nzip-safe = false'
# ^ used just to trigger section validation
pyproject = self.base_pyproject(tmp_path, setuptools_config, license_toml="")
license_files = "LICENCE-a.html COPYING-abc.txt AUTHORS-xyz NOTICE,def".split()
for fname in license_files:
(tmp_path / fname).write_text(f"{fname}\n", encoding="utf-8")
dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
assert (tmp_path / "LICENSE.txt").exists() # from base example
assert set(dist.metadata.license_files) == {*license_files, "LICENSE.txt"}
def test_missing_patterns(self, tmp_path):
pyproject = self.base_pyproject_license_pep639(tmp_path)
assert list(tmp_path.glob("_FILE*")) == [] # sanity check
msg1 = "Cannot find any files for the given pattern.*"
msg2 = "Normalizing 'licenseref-Proprietary' to 'LicenseRef-Proprietary'"
with (
pytest.warns(SetuptoolsDeprecationWarning, match=msg1),
pytest.warns(InformationOnly, match=msg2),
):
pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
def test_deprecated_file_expands_to_text(self, tmp_path):
"""Make sure the old example with ``license = {text = ...}`` works"""
assert 'license-files = ["LICENSE.txt"]' in PEP621_EXAMPLE # sanity check
text = PEP621_EXAMPLE.replace(
'license-files = ["LICENSE.txt"]',
'license = {file = "LICENSE.txt"}',
)
pyproject = _pep621_example_project(tmp_path, pyproject_text=text)
msg = ".project.license. as a TOML table is deprecated"
with pytest.warns(SetuptoolsDeprecationWarning, match=msg):
dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
assert dist.metadata.license == "--- LICENSE stub ---"
assert set(dist.metadata.license_files) == {"LICENSE.txt"} # auto-filled
|
TestLicenseFiles
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_resultset.py
|
{
"start": 75459,
"end": 96452
}
|
class ____(fixtures.TablesTest):
run_inserts = "once"
run_deletes = None
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"keyed1",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
)
Table("keyed2", metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table("keyed3", metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table("keyed4", metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table("content", metadata, Column("t", String(30), key="type"))
Table("bar", metadata, Column("ctype", String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
"wschema",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema,
)
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("team_id", metadata, ForeignKey("teams.id")),
)
Table(
"teams",
metadata,
Column("id", Integer, primary_key=True),
)
@classmethod
def insert_data(cls, connection):
conn = connection
conn.execute(cls.tables.keyed1.insert(), dict(b="a1", q="c1"))
conn.execute(cls.tables.keyed2.insert(), dict(a="a2", b="b2"))
conn.execute(cls.tables.keyed3.insert(), dict(a="a3", d="d3"))
conn.execute(cls.tables.keyed4.insert(), dict(b="b4", q="q4"))
conn.execute(cls.tables.content.insert(), dict(type="t1"))
conn.execute(cls.tables.teams.insert(), dict(id=1))
conn.execute(cls.tables.users.insert(), dict(id=1, team_id=1))
if testing.requires.schemas.enabled:
conn.execute(
cls.tables["%s.wschema" % testing.config.test_schema].insert(),
dict(b="a1", q="c1"),
)
@testing.requires.schemas
def test_keyed_accessor_wschema(self, connection):
keyed1 = self.tables["%s.wschema" % testing.config.test_schema]
row = connection.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self, connection):
keyed1 = self.tables.keyed1
row = connection.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self, connection):
keyed1 = self.tables.keyed1
row = connection.execute(
keyed1.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
def _test_keyed_targeting_no_label_at_all(self, expression, conn):
lt = literal_column("2")
stmt = select(literal_column("1"), expression, lt).select_from(
self.tables.keyed1
)
row = conn.execute(stmt).first()
eq_(row._mapping[expression], "a1")
eq_(row._mapping[lt], 2)
# Postgresql for example has the key as "?column?", which dupes
# easily. we get around that because we know that "2" is unique
eq_(row._mapping["2"], 2)
def test_keyed_targeting_no_label_at_all_one(self, connection):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
inherit_cache = True
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# explicit add
if "add_to_result_map" in kw:
kw["add_to_result_map"](None, None, (element,), NULLTYPE)
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select(not_named_max())), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm, connection)
def test_keyed_targeting_no_label_at_all_two(self, connection):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
inherit_cache = True
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# we don't add to keymap here; compiler should be doing it
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select(not_named_max())), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm, connection)
def test_keyed_targeting_no_label_at_all_text(self, connection):
t1 = text("max(a)")
t2 = text("min(a)")
stmt = select(t1, t2).select_from(self.tables.keyed1)
row = connection.execute(stmt).first()
eq_(row._mapping[t1], "a1")
eq_(row._mapping[t2], "a1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self, connection):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = connection.execute(
select(keyed1, keyed2)
.select_from(keyed1.join(keyed2, true()))
.set_label_style(LABEL_STYLE_NONE)
).first()
# column access is unambiguous
eq_(row._mapping[self.tables.keyed2.c.b], "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "a"
)
# for "b" we have kind of a choice. the name "b" is not ambiguous in
# cursor.description in this case. It is however ambiguous as far as
# the objects we have queried against, because keyed1.c.a has key="b"
# and keyed1.c.b is "b". historically this was allowed as
# non-ambiguous, however the column it targets changes based on
# whether or not the dupe is present so it's ambiguous
# eq_(row.b, "b2")
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "b"
)
# illustrate why row.b above is ambiguous, and not "b2"; because
# if we didn't have keyed2, now it matches row.a. a new column
# shouldn't be able to grab the value from a previous column.
row = connection.execute(select(keyed1)).first()
eq_(row.b, "a1")
def test_keyed_accessor_composite_conflict_2_fix_w_uselabels(
self, connection
):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = connection.execute(
select(keyed1, keyed2)
.select_from(keyed1.join(keyed2, true()))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
).first()
# column access is unambiguous
eq_(row._mapping[self.tables.keyed2.c.b], "b2")
eq_(row._mapping["keyed2_b"], "b2")
eq_(row._mapping["keyed1_a"], "a1")
def test_keyed_accessor_composite_names_precedent(self, connection):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = connection.execute(
select(keyed1, keyed4).select_from(keyed1.join(keyed4, true()))
).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self, connection):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = connection.execute(
select(keyed1, keyed3)
.select_from(keyed1.join(keyed3, true()))
.set_label_style(LABEL_STYLE_NONE)
).first()
eq_(row.q, "c1")
# prior to 1.4 #4887, this raised an "ambiguous column name 'a'""
# message, because "b" is linked to "a" which is a dupe. but we know
# where "b" is in the row by position.
eq_(row.b, "a1")
# "a" is of course ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr,
row,
"a",
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self, connection):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = connection.execute(
select(keyed1, keyed2)
.select_from(keyed1.join(keyed2, true()))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row._mapping["keyed2_c"])
assert_raises(KeyError, lambda: row._mapping["keyed2_q"])
def test_keyed_accessor_column_is_repeated_multiple_times(
self, connection
):
# test new logic added as a result of the combination of #4892 and
# #4887. We allow duplicate columns, but we also have special logic
# to disambiguate for the same column repeated, and as #4887 adds
# stricter ambiguous result column logic, the compiler has to know to
# not add these dupe columns to the result map, else they register as
# ambiguous.
keyed2 = self.tables.keyed2
keyed3 = self.tables.keyed3
stmt = (
select(
keyed2.c.a,
keyed3.c.a,
keyed2.c.a,
keyed2.c.a,
keyed3.c.a,
keyed3.c.a,
keyed3.c.d,
keyed3.c.d,
)
.select_from(keyed2.join(keyed3, true()))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
result = connection.execute(stmt)
# ensure the result map is the same number of cols so we can
# use positional targeting
eq_(
[rec[0] for rec in result.context.compiled._result_columns],
[
"keyed2_a",
"keyed3_a",
"keyed2_a__1",
"keyed2_a__2",
"keyed3_a__1",
"keyed3_a__2",
"keyed3_d",
"keyed3_d__1",
],
)
row = result.first()
# keyed access will ignore the dupe cols
eq_(row._mapping[keyed2.c.a], "a2")
eq_(row._mapping[keyed3.c.a], "a3")
eq_(result._getter(keyed3.c.a)(row), "a3")
eq_(row._mapping[keyed3.c.d], "d3")
# however we can get everything positionally
eq_(row, ("a2", "a3", "a2", "a2", "a3", "a3", "d3", "d3"))
eq_(row[0], "a2")
eq_(row[1], "a3")
eq_(row[2], "a2")
eq_(row[3], "a2")
eq_(row[4], "a3")
eq_(row[5], "a3")
eq_(row[6], "d3")
eq_(row[7], "d3")
@testing.requires.duplicate_names_in_cursor_description
@testing.combinations((None,), (0,), (1,), (2,), argnames="pos")
@testing.variation("texttype", ["literal", "text"])
def test_dupe_col_targeting(self, connection, pos, texttype):
"""test #11306"""
keyed2 = self.tables.keyed2
col = keyed2.c.b
data_value = "b2"
cols = [col, col, col]
expected = [data_value, data_value, data_value]
if pos is not None:
if texttype.literal:
cols[pos] = literal_column("10")
elif texttype.text:
cols[pos] = text("10")
else:
texttype.fail()
expected[pos] = 10
stmt = select(*cols)
result = connection.execute(stmt)
if texttype.text and pos is not None:
# when using text(), the name of the col is taken from
# cursor.description directly since we don't know what's
# inside a text()
key_for_text_col = result.cursor.description[pos][0]
elif texttype.literal and pos is not None:
# for literal_column(), we use the text
key_for_text_col = "10"
eq_(result.all(), [tuple(expected)])
result = connection.execute(stmt).mappings()
if pos is None:
eq_(set(result.keys()), {"b", "b__1", "b__2"})
eq_(
result.all(),
[{"b": data_value, "b__1": data_value, "b__2": data_value}],
)
else:
eq_(set(result.keys()), {"b", "b__1", key_for_text_col})
eq_(
result.all(),
[{"b": data_value, "b__1": data_value, key_for_text_col: 10}],
)
def test_columnclause_schema_column_one(self, connection):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("a"), sql.column("b")
stmt = select(a, b).select_from(table("keyed2"))
row = connection.execute(stmt).first()
in_(a, row._mapping)
in_(b, row._mapping)
keyed2 = self.tables.keyed2
not_in(keyed2.c.a, row._mapping)
not_in(keyed2.c.b, row._mapping)
def test_columnclause_schema_column_two(self, connection):
keyed2 = self.tables.keyed2
stmt = select(keyed2.c.a, keyed2.c.b)
row = connection.execute(stmt).first()
in_(keyed2.c.a, row._mapping)
in_(keyed2.c.b, row._mapping)
# in 1.x, would warn for string match, but return a result
a, b = sql.column("a"), sql.column("b")
not_in(a, row._mapping)
not_in(b, row._mapping)
def test_columnclause_schema_column_three(self, connection):
# this is also addressed by [ticket:2932]
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = connection.execute(stmt).first()
in_(stmt.selected_columns.a, row._mapping)
in_(stmt.selected_columns.b, row._mapping)
keyed2 = self.tables.keyed2
a, b = sql.column("a"), sql.column("b")
# in 1.x, would warn for string match, but return a result
not_in(keyed2.c.a, row._mapping)
not_in(keyed2.c.b, row._mapping)
not_in(a, row._mapping)
not_in(b, row._mapping)
not_in(stmt.subquery().c.a, row._mapping)
not_in(stmt.subquery().c.b, row._mapping)
def test_columnclause_schema_column_four(self, connection):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("keyed2_a"), sql.column("keyed2_b")
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b
)
row = connection.execute(stmt).first()
in_(a, row._mapping)
in_(b, row._mapping)
in_(stmt.selected_columns.keyed2_a, row._mapping)
in_(stmt.selected_columns.keyed2_b, row._mapping)
keyed2 = self.tables.keyed2
# in 1.x, would warn for string match, but return a result
not_in(keyed2.c.a, row._mapping)
not_in(keyed2.c.b, row._mapping)
not_in(stmt.subquery().c.keyed2_a, row._mapping)
not_in(stmt.subquery().c.keyed2_b, row._mapping)
def test_columnclause_schema_column_five(self, connection):
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR
)
row = connection.execute(stmt).first()
in_(stmt.selected_columns.keyed2_a, row._mapping)
in_(stmt.selected_columns.keyed2_b, row._mapping)
keyed2 = self.tables.keyed2
# in 1.x, would warn for string match, but return a result
not_in(keyed2.c.a, row._mapping)
not_in(keyed2.c.b, row._mapping)
not_in(stmt.subquery().c.keyed2_a, row._mapping)
not_in(stmt.subquery().c.keyed2_b, row._mapping)
def _adapt_result_columns_fixture_one(self):
keyed1 = self.tables.keyed1
stmt = (
select(keyed1.c.b, keyed1.c.q.label("foo"))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
return select(stmt.c.keyed1_b, stmt.c.foo)
def _adapt_result_columns_fixture_two(self):
return text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
column("keyed2_a", CHAR), column("keyed2_b", CHAR)
)
def _adapt_result_columns_fixture_three(self):
keyed1 = self.tables.keyed1
stmt = select(keyed1.c.b, keyed1.c.q.label("foo")).subquery()
return select(stmt.c.b, stmt.c.foo)
def _adapt_result_columns_fixture_four(self):
keyed1 = self.tables.keyed1
stmt1 = select(keyed1).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
a1 = keyed1.alias()
stmt2 = ClauseAdapter(a1).traverse(stmt1)
return stmt2
def _adapt_result_columns_fixture_five(self):
users, teams = self.tables("users", "teams")
return select(users.c.id, teams.c.id).select_from(
users.outerjoin(teams)
)
def _adapt_result_columns_fixture_six(self):
# this has _result_columns structure that is not ordered
# the same as the cursor.description.
return text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_b=CHAR,
keyed2_a=CHAR,
)
def _adapt_result_columns_fixture_seven(self):
# this has _result_columns structure that is not ordered
# the same as the cursor.description.
return text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_b=CHAR, bogus_col=CHAR
)
@testing.combinations(
_adapt_result_columns_fixture_one,
_adapt_result_columns_fixture_two,
_adapt_result_columns_fixture_three,
_adapt_result_columns_fixture_four,
_adapt_result_columns_fixture_five,
_adapt_result_columns_fixture_six,
_adapt_result_columns_fixture_seven,
argnames="stmt_fn",
)
def test_adapt_result_columns(self, connection, stmt_fn):
"""test adaptation of a CursorResultMetadata to another one.
This copies the _keymap from one to the other in terms of the
selected columns of a target selectable.
This is used by the statement caching process to re-use the
CursorResultMetadata from the cached statement against the same
statement sent separately.
"""
stmt1 = stmt_fn(self)
stmt2 = stmt_fn(self)
eq_(stmt1._generate_cache_key(), stmt2._generate_cache_key())
column_linkage = dict(
zip(stmt1.selected_columns, stmt2.selected_columns)
)
for i in range(2):
try:
result = connection.execute(stmt1)
mock_context = Mock(
compiled=result.context.compiled, invoked_statement=stmt2
)
existing_metadata = result._metadata
adapted_metadata = existing_metadata._adapt_to_context(
mock_context
)
eq_(existing_metadata.keys, adapted_metadata.keys)
for k in existing_metadata._keymap:
if isinstance(k, ColumnElement) and k in column_linkage:
other_k = column_linkage[k]
else:
other_k = k
is_(
existing_metadata._keymap[k],
adapted_metadata._keymap[other_k],
)
finally:
result.close()
@testing.combinations(
_adapt_result_columns_fixture_one,
_adapt_result_columns_fixture_two,
_adapt_result_columns_fixture_three,
_adapt_result_columns_fixture_four,
_adapt_result_columns_fixture_five,
_adapt_result_columns_fixture_six,
_adapt_result_columns_fixture_seven,
argnames="stmt_fn",
)
def test_adapt_result_columns_from_cache(self, connection, stmt_fn):
stmt1 = stmt_fn(self)
stmt2 = stmt_fn(self)
cache = {}
result = connection.execute(
stmt1,
execution_options={"compiled_cache": cache},
)
result.close()
assert cache
result = connection.execute(
stmt2,
execution_options={"compiled_cache": cache},
)
row = result.first()
for col in stmt2.selected_columns:
if "bogus" in col.name:
assert col not in row._mapping
else:
assert col in row._mapping
|
KeyTargetingTest
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/operators/limit_operator.py
|
{
"start": 537,
"end": 5337
}
|
class ____(OneToOneOperator):
"""Physical operator for limit."""
def __init__(
self,
limit: int,
input_op: PhysicalOperator,
data_context: DataContext,
):
self._limit = limit
self._consumed_rows = 0
self._buffer: Deque[RefBundle] = deque()
self._name = f"limit={limit}"
self._output_blocks_stats: List[BlockStats] = []
self._cur_output_bundles = 0
super().__init__(self._name, input_op, data_context)
if self._limit <= 0:
self.mark_execution_finished()
def _limit_reached(self) -> bool:
return self._consumed_rows >= self._limit
def _add_input_inner(self, refs: RefBundle, input_index: int) -> None:
assert not self.completed()
assert input_index == 0, input_index
if self._limit_reached():
return
out_blocks: List[ObjectRef[Block]] = []
out_metadata: List[BlockMetadata] = []
for block, metadata in refs.blocks:
num_rows = metadata.num_rows
assert num_rows is not None
if self._consumed_rows + num_rows <= self._limit:
out_blocks.append(block)
out_metadata.append(metadata)
self._output_blocks_stats.append(metadata.to_stats())
self._consumed_rows += num_rows
else:
# Slice the last block.
def slice_fn(block, metadata, num_rows) -> Tuple[Block, BlockMetadata]:
block = BlockAccessor.for_block(block).slice(
0, num_rows, copy=False
)
metadata = copy.deepcopy(metadata)
metadata.num_rows = num_rows
metadata.size_bytes = BlockAccessor.for_block(block).size_bytes()
return block, metadata
block, metadata_ref = cached_remote_fn(
slice_fn, num_cpus=0, num_returns=2
).remote(
block,
metadata,
self._limit - self._consumed_rows,
)
out_blocks.append(block)
metadata = ray.get(metadata_ref)
out_metadata.append(metadata)
self._output_blocks_stats.append(metadata.to_stats())
self._consumed_rows = self._limit
break
self._cur_output_bundles += 1
out_refs = RefBundle(
list(zip(out_blocks, out_metadata)),
owns_blocks=refs.owns_blocks,
schema=refs.schema,
)
self._buffer.append(out_refs)
self._metrics.on_output_queued(out_refs)
if self._limit_reached():
self.mark_execution_finished()
# We cannot estimate if we have only consumed empty blocks,
# or if the input dependency's total number of output bundles is unknown.
num_inputs = self.input_dependencies[0].num_outputs_total()
if self._consumed_rows > 0 and num_inputs is not None:
# Estimate number of output bundles
# Check the case where _limit > # of input rows
estimated_total_output_rows = min(
self._limit, self._consumed_rows / self._cur_output_bundles * num_inputs
)
# _consumed_rows / _limit is roughly equal to
# _cur_output_bundles / total output blocks
self._estimated_num_output_bundles = round(
estimated_total_output_rows
/ self._consumed_rows
* self._cur_output_bundles
)
def has_next(self) -> bool:
return len(self._buffer) > 0
def _get_next_inner(self) -> RefBundle:
output = self._buffer.popleft()
self._metrics.on_output_dequeued(output)
return output
def get_stats(self) -> StatsDict:
return {self._name: self._output_blocks_stats}
def num_outputs_total(self) -> Optional[int]:
# Before execution is completed, we don't know how many output
# bundles we will have. We estimate based off the consumption so far.
if self.has_execution_finished():
return self._cur_output_bundles
return self._estimated_num_output_bundles
def num_output_rows_total(self) -> Optional[int]:
# The total number of rows is simply the limit or the number
# of input rows, whichever is smaller
input_num_rows = self.input_dependencies[0].num_output_rows_total()
if input_num_rows is None:
return None
return min(self._limit, input_num_rows)
def throttling_disabled(self) -> bool:
return True
def implements_accurate_memory_accounting(self) -> bool:
return True
|
LimitOperator
|
python
|
ray-project__ray
|
release/ray_release/cluster_manager/full.py
|
{
"start": 389,
"end": 4833
}
|
class ____(MinimalClusterManager):
"""Full manager.
Builds app config and compute template and starts/terminated session
using SDK.
"""
def start_cluster(self, timeout: float = 600.0):
logger.info(f"Creating cluster {self.cluster_name}")
logger.info(f"Autosuspend time: {self.autosuspend_minutes} minutes")
logger.info(f"Auto terminate after: {self.maximum_uptime_minutes} minutes")
try:
result = self.sdk.create_cluster(
dict(
name=self.cluster_name,
project_id=self.project_id,
cluster_environment_build_id=self.cluster_env_build_id,
cluster_compute_id=self.cluster_compute_id,
idle_timeout_minutes=self.autosuspend_minutes,
)
)
self.cluster_id = result.result.id
except Exception as e:
raise ClusterCreationError(f"Error creating cluster: {e}") from e
# Trigger session start
logger.info(f"Starting cluster {self.cluster_name} ({self.cluster_id})")
cluster_url = anyscale_cluster_url(
project_id=self.project_id, cluster_id=self.cluster_id
)
logger.info(f"Link to cluster: {format_link(cluster_url)}")
try:
result = self.sdk.start_cluster(self.cluster_id, start_cluster_options={})
cop_id = result.result.id
completed = result.result.completed
except Exception as e:
raise ClusterStartupError(
f"Error starting cluster with name "
f"{self.cluster_name} and {self.cluster_id} ({cluster_url}): "
f"{e}"
) from e
# Wait for session
logger.info(f"Waiting for cluster {self.cluster_name}...")
start_time = time.monotonic()
timeout_at = start_time + timeout
next_status = start_time + 30
while not completed:
now = time.monotonic()
if now >= timeout_at:
raise ClusterStartupTimeout(
f"Time out when creating cluster {self.cluster_name}"
)
if now >= next_status:
logger.info(
f"... still waiting for cluster {self.cluster_name} "
f"({int(now - start_time)} seconds) ..."
)
next_status += 30
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: self.sdk.get_cluster_operation(cop_id, _request_timeout=30),
retry_exceptions=Exception,
initial_retry_delay_s=2,
max_retries=3,
)
completed = result.result.completed
result = self.sdk.get_cluster(self.cluster_id)
if result.result.state != "Running":
raise ClusterStartupFailed(
f"Cluster did not come up - most likely the nodes are currently "
f"not available. Please check the cluster startup logs: "
f"{cluster_url} (cluster state: {result.result.state})"
)
def terminate_cluster_ex(self, wait: bool = False):
if self.cluster_id:
logger.info(f"Terminating cluster with ID {self.cluster_id}")
# Just trigger a request. No need to wait until session shutdown.
result = self.sdk.terminate_cluster(
cluster_id=self.cluster_id, terminate_cluster_options={}
)
logger.info(f"Terminate request for cluster with ID {self.cluster_id} sent")
if not wait:
return
# Only do this when waiting
cop_id = result.result.id
completed = result.result.completed
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
cluster_operation_response = self.sdk.get_cluster_operation(
cop_id, _request_timeout=30
)
cluster_operation = cluster_operation_response.result
completed = cluster_operation.completed
result = self.sdk.get_cluster(self.cluster_id)
while result.result.state != "Terminated":
time.sleep(1)
result = self.sdk.get_cluster(self.cluster_id)
|
FullClusterManager
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
|
{
"start": 7212,
"end": 7777
}
|
class ____(graphene.Mutation):
"""Deletes a run from storage."""
Output = graphene.NonNull(GrapheneDeletePipelineRunResult)
class Arguments:
runId = graphene.NonNull(graphene.String)
class Meta:
name = "DeleteRunMutation"
@capture_error
@require_permission_check(Permissions.DELETE_PIPELINE_RUN)
def mutate(
self, graphene_info: ResolveInfo, runId: str
) -> Union[GrapheneRunNotFoundError, GrapheneDeletePipelineRunSuccess]:
return delete_pipeline_run(graphene_info, runId)
|
GrapheneDeleteRunMutation
|
python
|
walkccc__LeetCode
|
solutions/1879. Minimum XOR Sum of Two Arrays/1879.py
|
{
"start": 0,
"end": 359
}
|
class ____:
def minimumXORSum(self, nums1: list[int], nums2: list[int]) -> int:
@functools.lru_cache(None)
def dp(mask: int) -> int:
i = mask.bit_count()
if i == len(nums1):
return 0
return min((nums1[i] ^ nums2[j]) + dp(mask | 1 << j)
for j in range(len(nums2)) if not mask >> j & 1)
return dp(0)
|
Solution
|
python
|
PyCQA__flake8
|
src/flake8/formatting/base.py
|
{
"start": 286,
"end": 7357
}
|
class ____:
"""Class defining the formatter interface.
.. attribute:: options
The options parsed from both configuration files and the command-line.
.. attribute:: filename
If specified by the user, the path to store the results of the run.
.. attribute:: output_fd
Initialized when the :meth:`start` is called. This will be a file
object opened for writing.
.. attribute:: newline
The string to add to the end of a line. This is only used when the
output filename has been specified.
"""
def __init__(self, options: argparse.Namespace) -> None:
"""Initialize with the options parsed from config and cli.
This also calls a hook, :meth:`after_init`, so subclasses do not need
to call super to call this method.
:param options:
User specified configuration parsed from both configuration files
and the command-line interface.
"""
self.options = options
self.filename = options.output_file
self.output_fd: IO[str] | None = None
self.newline = "\n"
self.color = options.color == "always" or (
options.color == "auto"
and sys.stdout.isatty()
and _windows_color.terminal_supports_color
)
self.after_init()
def after_init(self) -> None:
"""Initialize the formatter further."""
def beginning(self, filename: str) -> None:
"""Notify the formatter that we're starting to process a file.
:param filename:
The name of the file that Flake8 is beginning to report results
from.
"""
def finished(self, filename: str) -> None:
"""Notify the formatter that we've finished processing a file.
:param filename:
The name of the file that Flake8 has finished reporting results
from.
"""
def start(self) -> None:
"""Prepare the formatter to receive input.
This defaults to initializing :attr:`output_fd` if :attr:`filename`
"""
if self.filename:
dirname = os.path.dirname(os.path.abspath(self.filename))
os.makedirs(dirname, exist_ok=True)
self.output_fd = open(self.filename, "a")
def handle(self, error: Violation) -> None:
"""Handle an error reported by Flake8.
This defaults to calling :meth:`format`, :meth:`show_source`, and
then :meth:`write`. To extend how errors are handled, override this
method.
:param error:
This will be an instance of
:class:`~flake8.violation.Violation`.
"""
line = self.format(error)
source = self.show_source(error)
self.write(line, source)
def format(self, error: Violation) -> str | None:
"""Format an error reported by Flake8.
This method **must** be implemented by subclasses.
:param error:
This will be an instance of
:class:`~flake8.violation.Violation`.
:returns:
The formatted error string.
"""
raise NotImplementedError(
"Subclass of BaseFormatter did not implement" " format.",
)
def show_statistics(self, statistics: Statistics) -> None:
"""Format and print the statistics."""
for error_code in statistics.error_codes():
stats_for_error_code = statistics.statistics_for(error_code)
statistic = next(stats_for_error_code)
count = statistic.count
count += sum(stat.count for stat in stats_for_error_code)
self._write(f"{count:<5} {error_code} {statistic.message}")
def show_benchmarks(self, benchmarks: list[tuple[str, float]]) -> None:
"""Format and print the benchmarks."""
# NOTE(sigmavirus24): The format strings are a little confusing, even
# to me, so here's a quick explanation:
# We specify the named value first followed by a ':' to indicate we're
# formatting the value.
# Next we use '<' to indicate we want the value left aligned.
# Then '10' is the width of the area.
# For floats, finally, we only want only want at most 3 digits after
# the decimal point to be displayed. This is the precision and it
# can not be specified for integers which is why we need two separate
# format strings.
float_format = "{value:<10.3} {statistic}".format
int_format = "{value:<10} {statistic}".format
for statistic, value in benchmarks:
if isinstance(value, int):
benchmark = int_format(statistic=statistic, value=value)
else:
benchmark = float_format(statistic=statistic, value=value)
self._write(benchmark)
def show_source(self, error: Violation) -> str | None:
"""Show the physical line generating the error.
This also adds an indicator for the particular part of the line that
is reported as generating the problem.
:param error:
This will be an instance of
:class:`~flake8.violation.Violation`.
:returns:
The formatted error string if the user wants to show the source.
If the user does not want to show the source, this will return
``None``.
"""
if not self.options.show_source or error.physical_line is None:
return ""
# Because column numbers are 1-indexed, we need to remove one to get
# the proper number of space characters.
indent = "".join(
c if c.isspace() else " "
for c in error.physical_line[: error.column_number - 1]
)
# Physical lines have a newline at the end, no need to add an extra
# one
return f"{error.physical_line}{indent}^"
def _write(self, output: str) -> None:
"""Handle logic of whether to use an output file or print()."""
if self.output_fd is not None:
self.output_fd.write(output + self.newline)
if self.output_fd is None or self.options.tee:
sys.stdout.buffer.write(output.encode() + self.newline.encode())
def write(self, line: str | None, source: str | None) -> None:
"""Write the line either to the output file or stdout.
This handles deciding whether to write to a file or print to standard
out for subclasses. Override this if you want behaviour that differs
from the default.
:param line:
The formatted string to print or write.
:param source:
The source code that has been formatted and associated with the
line of output.
"""
if line:
self._write(line)
if source:
self._write(source)
def stop(self) -> None:
"""Clean up after reporting is finished."""
if self.output_fd is not None:
self.output_fd.close()
self.output_fd = None
|
BaseFormatter
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/dataframe/datapipes.py
|
{
"start": 3737,
"end": 4626
}
|
class ____(DFIterDataPipe):
def __init__(self, source_datapipe, dataframe_size=10, columns=None) -> None:
self.source_datapipe = source_datapipe
self.columns = columns
self.dataframe_size = dataframe_size
def _as_list(self, item):
try:
return list(item)
except (
Exception
): # TODO(VitalyFedyunin): Replace with better iterable exception
return [item]
def __iter__(self):
aggregate = []
for item in self.source_datapipe:
aggregate.append(self._as_list(item))
if len(aggregate) == self.dataframe_size:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
aggregate = []
if len(aggregate) > 0:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
|
ExampleAggregateAsDataFrames
|
python
|
falconry__falcon
|
tests/asgi/test_response_media_asgi.py
|
{
"start": 6132,
"end": 8173
}
|
class ____:
def test_text(self):
async def test(resp):
resp.text = 'body'
resp.data = b'data'
resp.media = ['media']
assert await resp.render_body() == b'body'
run_test(test)
def test_data(self):
async def test(resp):
resp.data = b'data'
resp.media = ['media']
assert await resp.render_body() == b'data'
run_test(test)
def test_data_masquerading_as_text(self):
async def test(resp):
resp.text = b'data'
resp.media = ['media']
assert await resp.render_body() == b'data'
run_test(test)
def test_media(self):
async def test(resp):
resp.media = ['media']
assert json.loads((await resp.render_body()).decode('utf-8')) == ['media']
run_test(test)
def test_media_rendered_cached():
async def test(resp):
resp.media = {'foo': 'bar'}
first = await resp.render_body()
assert first is await resp.render_body()
assert first is resp._media_rendered
resp.media = 123
assert first is not await resp.render_body()
run_test(test)
def test_custom_render_body():
class CustomResponse(falcon.asgi.Response):
async def render_body(self):
body = await super().render_body()
if not self.content_type.startswith('text/plain'):
return body
if not body.endswith(b'\n'):
# Be a good Unix netizen
return body + b'\n'
return body
class HelloResource:
async def on_get(self, req, resp):
resp.content_type = falcon.MEDIA_TEXT
resp.text = 'Hello, World!'
app = falcon.asgi.App(response_type=CustomResponse)
app.add_route('/', HelloResource())
resp = testing.simulate_get(app, '/')
assert resp.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert resp.text == 'Hello, World!\n'
|
TestRenderBodyPrecedence
|
python
|
RobertCraigie__pyright-python
|
src/pyright/errors.py
|
{
"start": 258,
"end": 499
}
|
class ____(NodeError):
def __init__(self, target: Target, path: Path) -> None:
super().__init__(f'Expected {target} binary to exist at {path} but was not found.')
self.path = path
self.target = target
|
BinaryNotFound
|
python
|
numba__numba
|
numba/core/types/misc.py
|
{
"start": 4655,
"end": 5107
}
|
class ____(Type):
"""
Similar to EphemeralPointer, but pointing to an array of elements,
rather than a single one. The array size must be known at compile-time.
"""
def __init__(self, dtype, count):
self.dtype = dtype
self.count = count
name = "*%s[%d]" % (dtype, count)
super(EphemeralArray, self).__init__(name)
@property
def key(self):
return self.dtype, self.count
|
EphemeralArray
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/super_without_brackets.py
|
{
"start": 431,
"end": 698
}
|
class ____(Animal):
@staticmethod
def speak():
super = "super"
original_speak = super.speak() # OK
return f"{original_speak} But as a dog, it barks!"
def super_without_class() -> None:
super.blah() # OK
super.blah() # OK
|
FineDog
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/opengl/items/GLScatterPlotItem.py
|
{
"start": 459,
"end": 11539
}
|
class ____(GLGraphicsItem):
"""Draws points at a list of 3D positions."""
_shaderProgram = None
def __init__(self, parentItem=None, **kwds):
super().__init__()
glopts = kwds.pop('glOptions', 'additive')
self.setGLOptions(glopts)
self.pos = None
self.size = 10
self.color = [1.0,1.0,1.0,0.5]
self.pxMode = True
self.m_vbo_position = QtOpenGL.QOpenGLBuffer(QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
self.m_vbo_color = QtOpenGL.QOpenGLBuffer(QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
self.m_vbo_size = QtOpenGL.QOpenGLBuffer(QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
self.dirty_bits = DirtyFlag(0)
self.setParentItem(parentItem)
self.setData(**kwds)
def setData(self, **kwds):
"""
Update the data displayed by this item. All arguments are optional;
for example it is allowed to update spot positions while leaving
colors unchanged, etc.
==================== ==================================================
**Arguments:**
pos (N,3) array of floats specifying point locations.
color (N,4) array of floats (0.0-1.0) specifying
spot colors OR a tuple of floats specifying
a single color for all spots.
size (N,) array of floats specifying spot sizes or
a single value to apply to all spots.
pxMode If True, spot sizes are expressed in pixels.
Otherwise, they are expressed in item coordinates.
==================== ==================================================
"""
args = ['pos', 'color', 'size', 'pxMode']
for k in kwds.keys():
if k not in args:
raise Exception('Invalid keyword argument: %s (allowed arguments are %s)' % (k, str(args)))
if 'pos' in kwds:
pos = kwds.pop('pos')
self.pos = np.ascontiguousarray(pos, dtype=np.float32)
self.dirty_bits |= DirtyFlag.POSITION
if 'color' in kwds:
color = kwds.pop('color')
if isinstance(color, np.ndarray):
color = np.ascontiguousarray(color, dtype=np.float32)
self.dirty_bits |= DirtyFlag.COLOR
if isinstance(color, QtGui.QColor):
color = color.getRgbF()
self.color = color
if 'size' in kwds:
size = kwds.pop('size')
if isinstance(size, np.ndarray):
size = np.ascontiguousarray(size, dtype=np.float32)
self.dirty_bits |= DirtyFlag.SIZE
self.size = size
self.pxMode = kwds.get('pxMode', self.pxMode)
self.update()
def upload_vbo(self, vbo, arr):
if arr is None:
vbo.destroy()
return
if not vbo.isCreated():
vbo.create()
vbo.bind()
if vbo.size() != arr.nbytes:
vbo.allocate(arr, arr.nbytes)
else:
vbo.write(0, arr, arr.nbytes)
vbo.release()
@staticmethod
def getShaderProgram():
klass = GLScatterPlotItem
if klass._shaderProgram is not None:
return klass._shaderProgram
ctx = QtGui.QOpenGLContext.currentContext()
fmt = ctx.format()
if ctx.isOpenGLES():
if fmt.version() >= (3, 0):
glsl_version = "#version 300 es\n"
sources = SHADER_CORE
else:
glsl_version = "#version 100\n"
sources = SHADER_LEGACY
else:
if fmt.version() >= (3, 1):
glsl_version = "#version 140\n"
sources = SHADER_CORE
else:
glsl_version = "#version 120\n"
sources = SHADER_LEGACY
compiled = [shaders.compileShader([glsl_version, v], k) for k, v in sources.items()]
program = shaders.compileProgram(*compiled)
# bind generic vertex attrib 0 to "a_position" so that
# vertex attrib 0 definitely gets enabled later.
GL.glBindAttribLocation(program, 0, "a_position")
GL.glBindAttribLocation(program, 1, "a_color")
GL.glBindAttribLocation(program, 2, "a_size")
GL.glLinkProgram(program)
klass._shaderProgram = program
return program
def paint(self):
if self.pos is None:
return
self.setupGLState()
mat_mvp = self.mvpMatrix()
mat_mvp = np.array(mat_mvp.data(), dtype=np.float32)
mat_modelview = self.modelViewMatrix()
mat_modelview = np.array(mat_modelview.data(), dtype=np.float32)
view = self.view()
if self.pxMode:
scale = 0
else:
scale = 2.0 * math.tan(math.radians(0.5 * view.opts["fov"])) / view.width()
context = QtGui.QOpenGLContext.currentContext()
if DirtyFlag.POSITION in self.dirty_bits:
self.upload_vbo(self.m_vbo_position, self.pos)
if DirtyFlag.COLOR in self.dirty_bits:
self.upload_vbo(self.m_vbo_color, self.color)
if DirtyFlag.SIZE in self.dirty_bits:
self.upload_vbo(self.m_vbo_size, self.size)
self.dirty_bits = DirtyFlag(0)
if not context.isOpenGLES():
if _is_compatibility_profile(context):
GL.glEnable(GL.GL_POINT_SPRITE)
GL.glEnable(GL.GL_PROGRAM_POINT_SIZE)
program = self.getShaderProgram()
enabled_locs = []
loc = 0
self.m_vbo_position.bind()
GL.glVertexAttribPointer(loc, 3, GL.GL_FLOAT, False, 0, None)
self.m_vbo_position.release()
enabled_locs.append(loc)
loc = 1
if isinstance(self.color, np.ndarray):
self.m_vbo_color.bind()
GL.glVertexAttribPointer(loc, 4, GL.GL_FLOAT, False, 0, None)
self.m_vbo_color.release()
enabled_locs.append(loc)
else:
GL.glVertexAttrib4f(loc, *self.color)
loc = 2
if isinstance(self.size, np.ndarray):
self.m_vbo_size.bind()
GL.glVertexAttribPointer(loc, 1, GL.GL_FLOAT, False, 0, None)
self.m_vbo_size.release()
enabled_locs.append(loc)
else:
GL.glVertexAttrib1f(loc, self.size)
for loc in enabled_locs:
GL.glEnableVertexAttribArray(loc)
with program:
loc = GL.glGetUniformLocation(program, "u_mvp")
GL.glUniformMatrix4fv(loc, 1, False, mat_mvp)
loc = GL.glGetUniformLocation(program, "u_modelview")
GL.glUniformMatrix4fv(loc, 1, False, mat_modelview)
loc = GL.glGetUniformLocation(program, "u_scale")
GL.glUniform1f(loc, scale)
GL.glDrawArrays(GL.GL_POINTS, 0, len(self.pos))
for loc in enabled_locs:
GL.glDisableVertexAttribArray(loc)
def _is_compatibility_profile(context):
# https://stackoverflow.com/questions/73745603/detect-the-opengl-context-profile-before-version-3-2
sformat = context.format()
profile = sformat.profile()
# >= 3.2 has {Compatibility,Core}Profile
# <= 3.1 is NoProfile
if profile == sformat.OpenGLContextProfile.CompatibilityProfile:
compat = True
elif profile == sformat.OpenGLContextProfile.CoreProfile:
compat = False
else:
compat = False
version = sformat.version()
if version <= (2, 1):
compat = True
elif version == (3, 0):
if sformat.testOption(sformat.FormatOption.DeprecatedFunctions):
compat = True
elif version == (3, 1):
if context.hasExtension(b'GL_ARB_compatibility'):
compat = True
return compat
## See:
##
## http://stackoverflow.com/questions/9609423/applying-part-of-a-texture-sprite-sheet-texture-map-to-a-point-sprite-in-ios
## http://stackoverflow.com/questions/3497068/textured-points-in-opengl-es-2-0
##
##
SHADER_LEGACY = {
GL.GL_VERTEX_SHADER : """
uniform float u_scale;
uniform mat4 u_modelview;
uniform mat4 u_mvp;
attribute vec4 a_position;
attribute vec4 a_color;
attribute float a_size;
varying vec4 v_color;
void main() {
gl_Position = u_mvp * a_position;
v_color = a_color;
gl_PointSize = a_size;
if (u_scale != 0.0) {
// pxMode=False
// the modelview matrix transforms the vertex to
// camera space, where the camera is at (0, 0, 0).
vec4 cpos = u_modelview * a_position;
float dist = length(cpos.xyz);
// equations:
// xDist = dist * 2.0 * tan(0.5 * fov)
// pxSize = xDist / view_width
// let:
// u_scale = 2.0 * tan(0.5 * fov) / view_width
// then:
// pxSize = dist * u_scale
float pxSize = dist * u_scale;
gl_PointSize /= pxSize;
}
}
""",
GL.GL_FRAGMENT_SHADER : """
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 v_color;
void main()
{
vec2 xy = (gl_PointCoord - 0.5) * 2.0;
if (dot(xy, xy) <= 1.0) gl_FragColor = v_color;
else discard;
}
"""
}
SHADER_CORE = {
GL.GL_VERTEX_SHADER : """
uniform float u_scale;
uniform mat4 u_modelview;
uniform mat4 u_mvp;
in vec4 a_position;
in vec4 a_color;
in float a_size;
out vec4 v_color;
void main() {
gl_Position = u_mvp * a_position;
v_color = a_color;
gl_PointSize = a_size;
if (u_scale != 0.0) {
// pxMode=False
// the modelview matrix transforms the vertex to
// camera space, where the camera is at (0, 0, 0).
vec4 cpos = u_modelview * a_position;
float dist = length(cpos.xyz);
// equations:
// xDist = dist * 2.0 * tan(0.5 * fov)
// pxSize = xDist / view_width
// let:
// u_scale = 2.0 * tan(0.5 * fov) / view_width
// then:
// pxSize = dist * u_scale
float pxSize = dist * u_scale;
gl_PointSize /= pxSize;
}
}
""",
GL.GL_FRAGMENT_SHADER : """
#ifdef GL_ES
precision mediump float;
#endif
in vec4 v_color;
out vec4 fragColor;
void main()
{
vec2 xy = (gl_PointCoord - 0.5) * 2.0;
if (dot(xy, xy) <= 1.0) fragColor = v_color;
else discard;
}
"""
}
|
GLScatterPlotItem
|
python
|
numpy__numpy
|
numpy/_core/tests/test_finfo.py
|
{
"start": 113,
"end": 2488
}
|
class ____:
"""Minimal class to simulate machine arithmetic parameters."""
def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp):
self.dtype = dtype
self.machep = machep
self.negep = negep
self.minexp = minexp
self.maxexp = maxexp
self.nmant = nmant
self.iexp = iexp
self.eps = exp2(dtype(-nmant))
self.epsneg = exp2(dtype(negep))
self.precision = int(-log10(self.eps))
self.resolution = dtype(10) ** (-self.precision)
@pytest.fixture
def float16_ma():
"""Machine arithmetic parameters for float16."""
f16 = ntypes.float16
return MachArLike(f16,
machep=-10,
negep=-11,
minexp=-14,
maxexp=16,
nmant=10,
iexp=5)
@pytest.fixture
def float32_ma():
"""Machine arithmetic parameters for float32."""
f32 = ntypes.float32
return MachArLike(f32,
machep=-23,
negep=-24,
minexp=-126,
maxexp=128,
nmant=23,
iexp=8)
@pytest.fixture
def float64_ma():
"""Machine arithmetic parameters for float64."""
f64 = ntypes.float64
return MachArLike(f64,
machep=-52,
negep=-53,
minexp=-1022,
maxexp=1024,
nmant=52,
iexp=11)
@pytest.mark.parametrize("dtype,ma_fixture", [
(np.half, "float16_ma"),
(np.float32, "float32_ma"),
(np.float64, "float64_ma"),
])
@pytest.mark.parametrize("prop", [
'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp',
'eps', 'epsneg', 'precision', 'resolution'
])
@pytest.mark.thread_unsafe(
reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)"
)
def test_finfo_properties(dtype, ma_fixture, prop, request):
"""Test that finfo properties match expected machine arithmetic values."""
ma = request.getfixturevalue(ma_fixture)
finfo = np.finfo(dtype)
actual = getattr(finfo, prop)
expected = getattr(ma, prop)
assert actual == expected, (
f"finfo({dtype}) property '{prop}' mismatch: "
f"expected {expected}, got {actual}"
)
|
MachArLike
|
python
|
huggingface__transformers
|
src/transformers/models/perceiver/modeling_perceiver.py
|
{
"start": 13279,
"end": 15308
}
|
class ____(nn.Module):
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
widening_factor=4,
use_query_residual=True,
):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = PerceiverAttention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim,
use_query_residual=use_query_residual,
)
self.layernorm = nn.LayerNorm(q_dim)
self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
attention_outputs = self.attention(
hidden_states,
attention_mask,
inputs,
inputs_mask,
output_attentions,
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:] # add attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
layer_output = layer_output + attention_output # residual connection
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
layer_output = self.layernorm(attention_output)
layer_output = self.mlp(layer_output)
return layer_output
|
PerceiverLayer
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/sql_datasource.py
|
{
"start": 13171,
"end": 14356
}
|
class ____(FluentBaseModel):
column_names: List[str]
sort_ascending: bool = True
method_name: Literal["partition_on_multi_column_values"] = "partition_on_multi_column_values"
@property
def columns(self):
return self.column_names
@property
def param_names(self) -> List[str]:
return self.column_names
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {"column_names": self.column_names}
def batch_parameters_to_batch_spec_kwarg_identifiers(
self, options: BatchParameters
) -> Dict[str, Any]:
if not (set(self.column_names) <= set(options.keys())):
raise ValueError( # noqa: TRY003 # FIXME CoP
f"All column names, {self.column_names}, must be specified in the batch parameters. " # noqa: E501 # FIXME CoP
f" The options provided were f{options}."
)
return {col: options[col] for col in self.column_names}
def param_defaults(self, sql_asset: _SQLAsset) -> list[dict]:
return _partitioner_and_sql_asset_to_batch_identifier_data(
partitioner=self, asset=sql_asset
)
|
SqlPartitionerMultiColumnValue
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_organization_member_index.py
|
{
"start": 24452,
"end": 34581
}
|
class ____(OrganizationMemberListTestBase, HybridCloudTestMixin):
method = "post"
def invite_all_helper(self, role):
invite_roles = ["owner", "manager", "member"]
user = self.create_user("user@localhost")
member = self.create_member(user=user, organization=self.organization, role=role)
self.login_as(user=user)
self.organization.flags.disable_member_invite = True
self.organization.save()
allowed_roles = member.get_allowed_org_roles_to_invite()
for invite_role in invite_roles:
data = {
"email": f"{invite_role}_1@localhost",
"role": invite_role,
"teamRoles": [
{"teamSlug": self.team.slug, "role": "contributor"},
],
}
if role == "member" or role == "admin":
self.get_error_response(self.organization.slug, **data, status_code=403)
elif any(invite_role == allowed_role.id for allowed_role in allowed_roles):
self.get_success_response(self.organization.slug, **data, status_code=201)
else:
self.get_error_response(self.organization.slug, **data, status_code=400)
self.organization.flags.disable_member_invite = False
self.organization.save()
for invite_role in invite_roles:
data = {
"email": f"{invite_role}_2@localhost",
"role": invite_role,
"teamRoles": [
{"teamSlug": self.team.slug, "role": "contributor"},
],
}
if any(invite_role == allowed_role.id for allowed_role in allowed_roles):
self.get_success_response(self.organization.slug, **data, status_code=201)
else:
self.get_error_response(self.organization.slug, **data, status_code=400)
def invite_to_other_team_helper(self, role):
user = self.create_user("inviter@localhost")
self.create_member(user=user, organization=self.organization, role=role, teams=[self.team])
self.login_as(user=user)
other_team = self.create_team(organization=self.organization, name="Moo Deng's Team")
def get_data(email: str, other_team_invite: bool = False, use_team_roles: bool = True):
team_slug = other_team.slug if other_team_invite else self.team.slug
data: dict[str, str | list] = {
"email": f"{email}@localhost",
"role": "member",
}
if use_team_roles:
data["teamRoles"] = [{"teamSlug": team_slug, "role": "contributor"}]
else:
data["teams"] = [team_slug]
return data
# members can never invite members if disable_member_invite = True
self.organization.flags.allow_joinleave = True
self.organization.flags.disable_member_invite = True
self.organization.save()
response = self.get_error_response(
self.organization.slug, **get_data("foo1"), status_code=403
)
assert response.data.get("detail") == "You do not have permission to perform this action."
self.organization.flags.allow_joinleave = False
self.organization.flags.disable_member_invite = True
self.organization.save()
response = self.get_error_response(
self.organization.slug, **get_data("foo2"), status_code=403
)
assert response.data.get("detail") == "You do not have permission to perform this action."
# members can only invite members to teams they are in if allow_joinleave = False
self.organization.flags.allow_joinleave = False
self.organization.flags.disable_member_invite = False
self.organization.save()
self.get_success_response(self.organization.slug, **get_data("foo3"), status_code=201)
response = self.get_error_response(
self.organization.slug, **get_data("foo4", True), status_code=400
)
assert (
response.data.get("detail")
== "You cannot assign members to teams you are not a member of."
)
# also test with teams instead of teamRoles
self.get_success_response(
self.organization.slug, **get_data("foo5", use_team_roles=False), status_code=201
)
response = self.get_error_response(
self.organization.slug,
**get_data("foo6", other_team_invite=True, use_team_roles=False),
status_code=400,
)
assert (
response.data.get("detail")
== "You cannot assign members to teams you are not a member of."
)
# members can invite member to any team if allow_joinleave = True
self.organization.flags.allow_joinleave = True
self.organization.flags.disable_member_invite = False
self.organization.save()
self.get_success_response(self.organization.slug, **get_data("foo7"), status_code=201)
self.get_success_response(self.organization.slug, **get_data("foo8", True), status_code=201)
# also test with teams instead of teamRoles
self.get_success_response(
self.organization.slug, **get_data("foo9", use_team_roles=False), status_code=201
)
self.get_success_response(
self.organization.slug,
**get_data("foo10", other_team_invite=True, use_team_roles=False),
status_code=201,
)
def test_owner_invites(self) -> None:
self.invite_all_helper("owner")
def test_manager_invites(self) -> None:
self.invite_all_helper("manager")
def test_admin_invites(self) -> None:
self.invite_all_helper("admin")
self.invite_to_other_team_helper("admin")
def test_member_invites(self) -> None:
self.invite_all_helper("member")
self.invite_to_other_team_helper("member")
def test_respects_feature_flag(self) -> None:
user = self.create_user("baz@example.com")
with Feature({"organizations:invite-members": False}):
data = {"email": user.email, "role": "member", "teams": [self.team.slug]}
self.get_error_response(self.organization.slug, **data, status_code=403)
def test_no_team_invites(self) -> None:
data = {"email": "eric@localhost", "role": "owner", "teams": []}
response = self.get_success_response(self.organization.slug, **data)
assert response.data["email"] == "eric@localhost"
def test_can_invite_member_with_pending_invite_request(self) -> None:
email = "test@gmail.com"
invite_request = OrganizationMember.objects.create(
email=email,
organization=self.organization,
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
data = {"email": email, "role": "member", "teams": [self.team.slug]}
with self.settings(SENTRY_ENABLE_INVITES=True), self.tasks():
self.get_success_response(self.organization.slug, **data)
assert not OrganizationMember.objects.filter(id=invite_request.id).exists()
org_member = OrganizationMember.objects.filter(
organization=self.organization, email=email
).get()
self.assert_org_member_mapping(org_member=org_member)
assert len(mail.outbox) == 1
def test_can_invite_member_with_pending_join_request(self) -> None:
email = "test@gmail.com"
join_request = self.create_member(
email=email,
organization=self.organization,
invite_status=InviteStatus.REQUESTED_TO_JOIN.value,
)
self.assert_org_member_mapping(org_member=join_request)
data = {"email": email, "role": "member", "teams": [self.team.slug]}
with self.settings(SENTRY_ENABLE_INVITES=True), self.tasks(), outbox_runner():
self.get_success_response(self.organization.slug, **data)
assert not OrganizationMember.objects.filter(id=join_request.id).exists()
self.assert_org_member_mapping_not_exists(org_member=join_request)
org_member = OrganizationMember.objects.filter(
organization=self.organization, email=email
).get()
self.assert_org_member_mapping(org_member=org_member)
assert len(mail.outbox) == 1
def test_user_has_external_user_association(self) -> None:
response = self.get_success_response(
self.organization.slug, method="get", qs_params={"expand": "externalUsers"}
)
assert len(response.data) == 2
organization_member = next(
filter(lambda x: x["user"]["id"] == str(self.user2.id), response.data)
)
assert organization_member
assert len(organization_member["externalUsers"]) == 1
assert organization_member["externalUsers"][0]["id"] == str(self.external_user.id)
assert (
organization_member["externalUsers"][0]["userId"] == organization_member["user"]["id"]
)
def test_user_has_external_user_associations_across_multiple_orgs(self) -> None:
organization = self.create_organization(owner=self.user2)
integration = self.create_integration(
organization=self.organization, external_id="github:2", name="GitHub", provider="github"
)
self.create_external_user(self.user2, organization, integration=integration)
response = self.get_success_response(
self.organization.slug, method="get", qs_params={"expand": "externalUsers"}
)
assert len(response.data) == 2
organization_member = next(
filter(lambda x: x["user"]["id"] == str(self.user2.id), response.data)
)
assert organization_member
assert len(organization_member["externalUsers"]) == 1
assert organization_member["externalUsers"][0]["id"] == str(self.external_user.id)
assert (
organization_member["externalUsers"][0]["userId"] == organization_member["user"]["id"]
)
|
OrganizationMemberPermissionRoleTest
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_pod_status.py
|
{
"start": 383,
"end": 29201
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1PodCondition]',
'container_statuses': 'list[V1ContainerStatus]',
'ephemeral_container_statuses': 'list[V1ContainerStatus]',
'extended_resource_claim_status': 'V1PodExtendedResourceClaimStatus',
'host_ip': 'str',
'host_i_ps': 'list[V1HostIP]',
'init_container_statuses': 'list[V1ContainerStatus]',
'message': 'str',
'nominated_node_name': 'str',
'observed_generation': 'int',
'phase': 'str',
'pod_ip': 'str',
'pod_i_ps': 'list[V1PodIP]',
'qos_class': 'str',
'reason': 'str',
'resize': 'str',
'resource_claim_statuses': 'list[V1PodResourceClaimStatus]',
'start_time': 'datetime'
}
attribute_map = {
'conditions': 'conditions',
'container_statuses': 'containerStatuses',
'ephemeral_container_statuses': 'ephemeralContainerStatuses',
'extended_resource_claim_status': 'extendedResourceClaimStatus',
'host_ip': 'hostIP',
'host_i_ps': 'hostIPs',
'init_container_statuses': 'initContainerStatuses',
'message': 'message',
'nominated_node_name': 'nominatedNodeName',
'observed_generation': 'observedGeneration',
'phase': 'phase',
'pod_ip': 'podIP',
'pod_i_ps': 'podIPs',
'qos_class': 'qosClass',
'reason': 'reason',
'resize': 'resize',
'resource_claim_statuses': 'resourceClaimStatuses',
'start_time': 'startTime'
}
def __init__(self, conditions=None, container_statuses=None, ephemeral_container_statuses=None, extended_resource_claim_status=None, host_ip=None, host_i_ps=None, init_container_statuses=None, message=None, nominated_node_name=None, observed_generation=None, phase=None, pod_ip=None, pod_i_ps=None, qos_class=None, reason=None, resize=None, resource_claim_statuses=None, start_time=None, local_vars_configuration=None): # noqa: E501
"""V1PodStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self._container_statuses = None
self._ephemeral_container_statuses = None
self._extended_resource_claim_status = None
self._host_ip = None
self._host_i_ps = None
self._init_container_statuses = None
self._message = None
self._nominated_node_name = None
self._observed_generation = None
self._phase = None
self._pod_ip = None
self._pod_i_ps = None
self._qos_class = None
self._reason = None
self._resize = None
self._resource_claim_statuses = None
self._start_time = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
if container_statuses is not None:
self.container_statuses = container_statuses
if ephemeral_container_statuses is not None:
self.ephemeral_container_statuses = ephemeral_container_statuses
if extended_resource_claim_status is not None:
self.extended_resource_claim_status = extended_resource_claim_status
if host_ip is not None:
self.host_ip = host_ip
if host_i_ps is not None:
self.host_i_ps = host_i_ps
if init_container_statuses is not None:
self.init_container_statuses = init_container_statuses
if message is not None:
self.message = message
if nominated_node_name is not None:
self.nominated_node_name = nominated_node_name
if observed_generation is not None:
self.observed_generation = observed_generation
if phase is not None:
self.phase = phase
if pod_ip is not None:
self.pod_ip = pod_ip
if pod_i_ps is not None:
self.pod_i_ps = pod_i_ps
if qos_class is not None:
self.qos_class = qos_class
if reason is not None:
self.reason = reason
if resize is not None:
self.resize = resize
if resource_claim_statuses is not None:
self.resource_claim_statuses = resource_claim_statuses
if start_time is not None:
self.start_time = start_time
@property
def conditions(self):
"""Gets the conditions of this V1PodStatus. # noqa: E501
Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
:return: The conditions of this V1PodStatus. # noqa: E501
:rtype: list[V1PodCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1PodStatus.
Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
:param conditions: The conditions of this V1PodStatus. # noqa: E501
:type: list[V1PodCondition]
"""
self._conditions = conditions
@property
def container_statuses(self):
"""Gets the container_statuses of this V1PodStatus. # noqa: E501
Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
:return: The container_statuses of this V1PodStatus. # noqa: E501
:rtype: list[V1ContainerStatus]
"""
return self._container_statuses
@container_statuses.setter
def container_statuses(self, container_statuses):
"""Sets the container_statuses of this V1PodStatus.
Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
:param container_statuses: The container_statuses of this V1PodStatus. # noqa: E501
:type: list[V1ContainerStatus]
"""
self._container_statuses = container_statuses
@property
def ephemeral_container_statuses(self):
"""Gets the ephemeral_container_statuses of this V1PodStatus. # noqa: E501
Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
:return: The ephemeral_container_statuses of this V1PodStatus. # noqa: E501
:rtype: list[V1ContainerStatus]
"""
return self._ephemeral_container_statuses
@ephemeral_container_statuses.setter
def ephemeral_container_statuses(self, ephemeral_container_statuses):
"""Sets the ephemeral_container_statuses of this V1PodStatus.
Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
:param ephemeral_container_statuses: The ephemeral_container_statuses of this V1PodStatus. # noqa: E501
:type: list[V1ContainerStatus]
"""
self._ephemeral_container_statuses = ephemeral_container_statuses
@property
def extended_resource_claim_status(self):
"""Gets the extended_resource_claim_status of this V1PodStatus. # noqa: E501
:return: The extended_resource_claim_status of this V1PodStatus. # noqa: E501
:rtype: V1PodExtendedResourceClaimStatus
"""
return self._extended_resource_claim_status
@extended_resource_claim_status.setter
def extended_resource_claim_status(self, extended_resource_claim_status):
"""Sets the extended_resource_claim_status of this V1PodStatus.
:param extended_resource_claim_status: The extended_resource_claim_status of this V1PodStatus. # noqa: E501
:type: V1PodExtendedResourceClaimStatus
"""
self._extended_resource_claim_status = extended_resource_claim_status
@property
def host_ip(self):
"""Gets the host_ip of this V1PodStatus. # noqa: E501
hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod # noqa: E501
:return: The host_ip of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._host_ip
@host_ip.setter
def host_ip(self, host_ip):
"""Sets the host_ip of this V1PodStatus.
hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod # noqa: E501
:param host_ip: The host_ip of this V1PodStatus. # noqa: E501
:type: str
"""
self._host_ip = host_ip
@property
def host_i_ps(self):
"""Gets the host_i_ps of this V1PodStatus. # noqa: E501
hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod. # noqa: E501
:return: The host_i_ps of this V1PodStatus. # noqa: E501
:rtype: list[V1HostIP]
"""
return self._host_i_ps
@host_i_ps.setter
def host_i_ps(self, host_i_ps):
"""Sets the host_i_ps of this V1PodStatus.
hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod. # noqa: E501
:param host_i_ps: The host_i_ps of this V1PodStatus. # noqa: E501
:type: list[V1HostIP]
"""
self._host_i_ps = host_i_ps
@property
def init_container_statuses(self):
"""Gets the init_container_statuses of this V1PodStatus. # noqa: E501
Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status # noqa: E501
:return: The init_container_statuses of this V1PodStatus. # noqa: E501
:rtype: list[V1ContainerStatus]
"""
return self._init_container_statuses
@init_container_statuses.setter
def init_container_statuses(self, init_container_statuses):
"""Sets the init_container_statuses of this V1PodStatus.
Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status # noqa: E501
:param init_container_statuses: The init_container_statuses of this V1PodStatus. # noqa: E501
:type: list[V1ContainerStatus]
"""
self._init_container_statuses = init_container_statuses
@property
def message(self):
"""Gets the message of this V1PodStatus. # noqa: E501
A human readable message indicating details about why the pod is in this condition. # noqa: E501
:return: The message of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1PodStatus.
A human readable message indicating details about why the pod is in this condition. # noqa: E501
:param message: The message of this V1PodStatus. # noqa: E501
:type: str
"""
self._message = message
@property
def nominated_node_name(self):
"""Gets the nominated_node_name of this V1PodStatus. # noqa: E501
nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. # noqa: E501
:return: The nominated_node_name of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._nominated_node_name
@nominated_node_name.setter
def nominated_node_name(self, nominated_node_name):
"""Sets the nominated_node_name of this V1PodStatus.
nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. # noqa: E501
:param nominated_node_name: The nominated_node_name of this V1PodStatus. # noqa: E501
:type: str
"""
self._nominated_node_name = nominated_node_name
@property
def observed_generation(self):
"""Gets the observed_generation of this V1PodStatus. # noqa: E501
If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. # noqa: E501
:return: The observed_generation of this V1PodStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1PodStatus.
If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. # noqa: E501
:param observed_generation: The observed_generation of this V1PodStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def phase(self):
"""Gets the phase of this V1PodStatus. # noqa: E501
The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase # noqa: E501
:return: The phase of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this V1PodStatus.
The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase # noqa: E501
:param phase: The phase of this V1PodStatus. # noqa: E501
:type: str
"""
self._phase = phase
@property
def pod_ip(self):
"""Gets the pod_ip of this V1PodStatus. # noqa: E501
podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. # noqa: E501
:return: The pod_ip of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._pod_ip
@pod_ip.setter
def pod_ip(self, pod_ip):
"""Sets the pod_ip of this V1PodStatus.
podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. # noqa: E501
:param pod_ip: The pod_ip of this V1PodStatus. # noqa: E501
:type: str
"""
self._pod_ip = pod_ip
@property
def pod_i_ps(self):
"""Gets the pod_i_ps of this V1PodStatus. # noqa: E501
podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. # noqa: E501
:return: The pod_i_ps of this V1PodStatus. # noqa: E501
:rtype: list[V1PodIP]
"""
return self._pod_i_ps
@pod_i_ps.setter
def pod_i_ps(self, pod_i_ps):
"""Sets the pod_i_ps of this V1PodStatus.
podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. # noqa: E501
:param pod_i_ps: The pod_i_ps of this V1PodStatus. # noqa: E501
:type: list[V1PodIP]
"""
self._pod_i_ps = pod_i_ps
@property
def qos_class(self):
"""Gets the qos_class of this V1PodStatus. # noqa: E501
The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes # noqa: E501
:return: The qos_class of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._qos_class
@qos_class.setter
def qos_class(self, qos_class):
"""Sets the qos_class of this V1PodStatus.
The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes # noqa: E501
:param qos_class: The qos_class of this V1PodStatus. # noqa: E501
:type: str
"""
self._qos_class = qos_class
@property
def reason(self):
"""Gets the reason of this V1PodStatus. # noqa: E501
A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' # noqa: E501
:return: The reason of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1PodStatus.
A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' # noqa: E501
:param reason: The reason of this V1PodStatus. # noqa: E501
:type: str
"""
self._reason = reason
@property
def resize(self):
"""Gets the resize of this V1PodStatus. # noqa: E501
Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources. # noqa: E501
:return: The resize of this V1PodStatus. # noqa: E501
:rtype: str
"""
return self._resize
@resize.setter
def resize(self, resize):
"""Sets the resize of this V1PodStatus.
Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources. # noqa: E501
:param resize: The resize of this V1PodStatus. # noqa: E501
:type: str
"""
self._resize = resize
@property
def resource_claim_statuses(self):
"""Gets the resource_claim_statuses of this V1PodStatus. # noqa: E501
Status of resource claims. # noqa: E501
:return: The resource_claim_statuses of this V1PodStatus. # noqa: E501
:rtype: list[V1PodResourceClaimStatus]
"""
return self._resource_claim_statuses
@resource_claim_statuses.setter
def resource_claim_statuses(self, resource_claim_statuses):
"""Sets the resource_claim_statuses of this V1PodStatus.
Status of resource claims. # noqa: E501
:param resource_claim_statuses: The resource_claim_statuses of this V1PodStatus. # noqa: E501
:type: list[V1PodResourceClaimStatus]
"""
self._resource_claim_statuses = resource_claim_statuses
@property
def start_time(self):
"""Gets the start_time of this V1PodStatus. # noqa: E501
RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. # noqa: E501
:return: The start_time of this V1PodStatus. # noqa: E501
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this V1PodStatus.
RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. # noqa: E501
:param start_time: The start_time of this V1PodStatus. # noqa: E501
:type: datetime
"""
self._start_time = start_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodStatus):
return True
return self.to_dict() != other.to_dict()
|
V1PodStatus
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/modeling_oneformer.py
|
{
"start": 96795,
"end": 102665
}
|
class ____(nn.Module):
"""
Transformer decoder
"""
def __init__(self, in_channels: int, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.num_heads = config.num_attention_heads
self.is_training = config.is_training
self.use_task_norm = config.use_task_norm
self.use_auxiliary_loss = config.use_auxiliary_loss
self.query_transformer = OneFormerTransformerDecoderQueryTransformer(
d_model=config.hidden_dim,
dropout=config.dropout,
nhead=config.num_attention_heads,
dim_feedforward=config.dim_feedforward,
num_decoder_layers=config.query_dec_layers,
normalize_before=config.pre_norm,
return_intermediate_dec=False,
layer_norm_eps=config.layer_norm_eps,
)
self.decoder_norm = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_eps)
self.num_feature_levels = 3
self.layers = nn.ModuleList(
[OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)]
)
self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1)
self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1)
self.mask_embed = OneFormerMLPPredictionHead(
config.hidden_dim,
config.hidden_dim,
config.mask_dim,
3,
)
def forward(
self,
task_token=None,
multi_stage_features=None,
multi_stage_positional_embeddings=None,
mask_features=None,
query_features=None,
query_embeddings=None,
query_embedder=None,
size_list=None,
output_attentions=None,
):
if self.use_task_norm:
task_token = self.decoder_norm(task_token)
object_queries = self.query_transformer(
query_features,
None,
query_embedder.weight[:-1],
self.query_input_projection(mask_features),
task_token if self.use_task_norm else None,
)
object_queries = object_queries[0].permute(1, 0, 2)
queries = torch.cat([object_queries, task_token], dim=0)
output = queries.clone()
intermediate_class_predictions = []
intermediate_mask_predictions = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[0]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
attentions = ()
for index, layer in enumerate(self.layers):
layer_outputs = layer(
index=index,
output=output,
multi_stage_features=multi_stage_features,
multi_stage_positional_embeddings=multi_stage_positional_embeddings,
attention_mask=attention_mask,
query_embeddings=query_embeddings,
output_attentions=output_attentions,
)
output = layer_outputs[0]
attentions += (layer_outputs[1:],)
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
if not len(intermediate_mask_predictions) == len(self.layers) + 1:
raise ValueError(
"Intermediate predictions in the transformer decoder must have the same number of elements as number"
" of layers"
)
object_queries = layer_outputs[0].permute(1, 0, 2)
contrastive_logits = queries.permute(1, 0, 2)
return OneFormerTransformerDecoderOutput(
object_queries=object_queries,
contrastive_logits=contrastive_logits,
prediction_masks=intermediate_mask_predictions[-1],
prediction_class=intermediate_class_predictions[-1],
auxiliary_predictions=self._get_aux_predictions(
intermediate_class_predictions, intermediate_mask_predictions
)
if self.use_auxiliary_loss
else None,
attentions=attentions,
)
def forward_prediction_heads(self, output, mask_features, attention_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
attention_mask = nn.functional.interpolate(
outputs_mask, size=attention_mask_target_size, mode="bilinear", align_corners=False
)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attention_mask = (
attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5
).bool()
attention_mask = attention_mask.detach()
return outputs_class, outputs_mask, attention_mask
def _get_aux_predictions(self, outputs_class, outputs_seg_masks):
aux_list = [
{"class_queries_logits": a, "masks_queries_logits": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
return tuple(aux_list)
|
OneFormerTransformerDecoder
|
python
|
getsentry__sentry
|
src/sentry/models/dashboard_widget.py
|
{
"start": 4496,
"end": 5021
}
|
class ____(TypesClass):
LINE_CHART = 0
AREA_CHART = 1
STACKED_AREA_CHART = 2
BAR_CHART = 3
TABLE = 4
BIG_NUMBER = 6
TOP_N = 7
DETAILS = 8
TYPES = [
(LINE_CHART, "line"),
(AREA_CHART, "area"),
(STACKED_AREA_CHART, "stacked_area"),
(BAR_CHART, "bar"),
(TABLE, "table"),
(BIG_NUMBER, "big_number"),
(TOP_N, "top_n"),
(DETAILS, "details"),
]
TYPE_NAMES = [t[1] for t in TYPES]
@region_silo_model
|
DashboardWidgetDisplayTypes
|
python
|
aimacode__aima-python
|
search.py
|
{
"start": 43428,
"end": 44027
}
|
class ____(GraphProblem):
"""
A version of GraphProblem where an action can lead to
nondeterministic output i.e. multiple possible states.
Define the graph as dict(A = dict(Action = [[<Result 1>, <Result 2>, ...], <cost>], ...), ...)
A the dictionary format is different, make sure the graph is created as a directed graph.
"""
def result(self, state, action):
return self.graph.get(state, action)
def path_cost(self):
raise NotImplementedError
# ______________________________________________________________________________
|
GraphProblemStochastic
|
python
|
django__django
|
tests/serializers/models/data.py
|
{
"start": 7456,
"end": 7571
}
|
class ____(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
|
LengthModel
|
python
|
keras-team__keras
|
keras/src/layers/rnn/conv_lstm2d_test.py
|
{
"start": 160,
"end": 3194
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
channels_last = backend.config.image_data_format() == "channels_last"
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 4, 3) if channels_last else (3, 2, 3, 4, 4),
expected_output_shape=(
(3, 4, 4, 5) if channels_last else (3, 5, 4, 4)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8),
call_kwargs={"training": True},
expected_output_shape=(
(3, 6, 6, 5) if channels_last else (3, 5, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8),
expected_output_shape=(
(3, 2, 6, 6, 5) if channels_last else (3, 2, 5, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = (
np.arange(480).reshape((2, 3, 4, 4, 5)).astype("float32") / 100
)
expected_output = np.array(
[
[
[[0.48694518, 0.48694518], [0.50237733, 0.50237733]],
[[0.5461202, 0.5461202], [0.5598283, 0.5598283]],
],
[
[[0.8661607, 0.8661607], [0.86909103, 0.86909103]],
[[0.8774414, 0.8774414], [0.8800861, 0.8800861]],
],
]
)
if backend.config.image_data_format() == "channels_first":
sequence = sequence.transpose((0, 1, 4, 2, 3))
expected_output = expected_output.transpose((0, 3, 1, 2))
layer = layers.ConvLSTM2D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
expected_output,
output,
)
|
ConvLSTM2DTest
|
python
|
python__mypy
|
mypyc/ir/ops.py
|
{
"start": 42294,
"end": 43942
}
|
class ____(RegisterOp):
"""Binary arithmetic or bitwise op on integer operands (e.g., r1 = r2 + r3).
These ops are low-level and are similar to the corresponding C
operations.
The left and right values must have low-level integer types with
compatible representations. Fixed-width integers, short_int_rprimitive,
bool_rprimitive and bit_rprimitive are supported.
For tagged (arbitrary-precision) integer ops look at mypyc.primitives.int_ops.
"""
error_kind = ERR_NEVER
# Arithmetic ops
ADD: Final = 0
SUB: Final = 1
MUL: Final = 2
DIV: Final = 3
MOD: Final = 4
# Bitwise ops
AND: Final = 200
OR: Final = 201
XOR: Final = 202
LEFT_SHIFT: Final = 203
RIGHT_SHIFT: Final = 204
op_str: Final = {
ADD: "+",
SUB: "-",
MUL: "*",
DIV: "/",
MOD: "%",
AND: "&",
OR: "|",
XOR: "^",
LEFT_SHIFT: "<<",
RIGHT_SHIFT: ">>",
}
def __init__(self, type: RType, lhs: Value, rhs: Value, op: int, line: int = -1) -> None:
super().__init__(line)
self.type = type
self.lhs = lhs
self.rhs = rhs
self.op = op
def sources(self) -> list[Value]:
return [self.lhs, self.rhs]
def set_sources(self, new: list[Value]) -> None:
self.lhs, self.rhs = new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_int_op(self)
# We can't have this in the IntOp class body, because of
# https://github.com/mypyc/mypyc/issues/932.
int_op_to_id: Final = {op: op_id for op_id, op in IntOp.op_str.items()}
@final
|
IntOp
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 1743,
"end": 1899
}
|
class ____(str, Enum):
"""
Bulk Action to be taken if the entity does not exist.
"""
FAIL = "fail"
SKIP = "skip"
|
BulkActionNotOnExistence
|
python
|
pytorch__pytorch
|
test/torch_np/test_basic.py
|
{
"start": 10874,
"end": 11132
}
|
class ____(TestCase):
"""Smoke_test (sequence of scalars) -> (array)"""
@parametrize("func, args", funcs_and_args)
def test_argstoarray_simple(self, func, args):
a = func(*args)
assert isinstance(a, w.ndarray)
|
TestPythonArgsToArray
|
python
|
kamyu104__LeetCode-Solutions
|
Python/clone-graph.py
|
{
"start": 29,
"end": 143
}
|
class ____(object):
def __init__(self, x):
self.label = x
self.neighbors = []
|
UndirectedGraphNode
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-docker/dagster_docker/pipes.py
|
{
"start": 1919,
"end": 8242
}
|
class ____(PipesClient, TreatAsResourceParam):
"""A pipes client that runs external processes in docker containers.
By default context is injected via environment variables and messages are parsed out of the
log stream, with other logs forwarded to stdout of the orchestration process.
Args:
env (Optional[Mapping[str, str]]): An optional dict of environment variables to pass to the
container.
register (Optional[Mapping[str, str]]): An optional dict of registry credentials to login to
the docker client.
context_injector (Optional[PipesContextInjector]): A context injector to use to inject
context into the docker container process. Defaults to :py:class:`PipesEnvContextInjector`.
message_reader (Optional[PipesMessageReader]): A message reader to use to read messages
from the docker container process. Defaults to :py:class:`DockerLogsMessageReader`.
"""
def __init__(
self,
env: Optional[Mapping[str, str]] = None,
registry: Optional[Mapping[str, str]] = None,
context_injector: Optional[PipesContextInjector] = None,
message_reader: Optional[PipesMessageReader] = None,
):
self.env = check.opt_mapping_param(env, "env", key_type=str, value_type=str)
self.registry = check.opt_mapping_param(registry, "registry", key_type=str, value_type=str)
self.context_injector = (
check.opt_inst_param(
context_injector,
"context_injector",
PipesContextInjector,
)
or PipesEnvContextInjector()
)
self.message_reader = (
check.opt_inst_param(message_reader, "message_reader", PipesMessageReader)
or PipesDockerLogsMessageReader()
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def run( # pyright: ignore[reportIncompatibleMethodOverride]
self,
*,
context: Union[OpExecutionContext, AssetExecutionContext],
image: str,
extras: Optional[PipesExtras] = None,
command: Optional[Union[str, Sequence[str]]] = None,
env: Optional[Mapping[str, str]] = None,
registry: Optional[Mapping[str, str]] = None,
container_kwargs: Optional[Mapping[str, Any]] = None,
) -> PipesClientCompletedInvocation:
"""Create a docker container and run it to completion, enriched with the pipes protocol.
Args:
image (str):
The image for the container to use.
command (Optional[Union[str, Sequence[str]]]):
The command for the container use.
env (Optional[Mapping[str,str]]):
A mapping of environment variable names to values to set on the first
container in the pod spec, on top of those configured on resource.
registry (Optional[Mapping[str, str]]:
A mapping containing url, username, and password to be used
with docker client login.
container_kwargs (Optional[Mapping[str, Any]]:
Arguments to be forwarded to docker client containers.create.
extras (Optional[PipesExtras]):
Extra values to pass along as part of the ext protocol.
context_injector (Optional[PipesContextInjector]):
Override the default ext protocol context injection.
message_reader (Optional[PipesMessageReader]):
Override the default ext protocol message reader.
Returns:
PipesClientCompletedInvocation: Wrapper containing results reported by the external
process.
"""
with open_pipes_session(
context=context,
context_injector=self.context_injector,
message_reader=self.message_reader,
extras=extras,
) as pipes_session:
client = docker.client.from_env()
registry = registry or self.registry
if registry:
client.login(
registry=registry["url"],
username=registry["username"],
password=registry["password"],
)
try:
container = self._create_container(
client=client,
image=image,
command=command,
env=env,
open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),
container_kwargs=container_kwargs,
)
except docker.errors.ImageNotFound:
client.images.pull(image)
container = self._create_container(
client=client,
image=image,
command=command,
env=env,
open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),
container_kwargs=container_kwargs,
)
result = container.start()
try:
if isinstance(self.message_reader, PipesDockerLogsMessageReader):
self.message_reader.consume_docker_logs(container)
result = container.wait()
if result["StatusCode"] != 0:
raise DagsterPipesError(f"Container exited with non-zero status code: {result}")
finally:
container.stop()
return PipesClientCompletedInvocation(pipes_session)
def _create_container(
self,
client,
image: str,
command: Optional[Union[str, Sequence[str]]],
env: Optional[Mapping[str, str]],
container_kwargs: Optional[Mapping[str, Any]],
open_pipes_session_env: Mapping[str, str],
):
kwargs = dict(container_kwargs or {})
kwargs_env = kwargs.pop("environment", {})
return client.containers.create(
image=image,
command=command,
detach=True,
environment={
**open_pipes_session_env,
**(self.env or {}),
**(env or {}),
**kwargs_env,
},
**kwargs,
)
|
PipesDockerClient
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/v1_compat_tests/session_ops_test.py
|
{
"start": 1193,
"end": 11100
}
|
class ____(test.TestCase):
@test_util.run_deprecated_v1
def testHandleBasic(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleEval(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Get the tensor from its handle.
self.assertEqual(50, h.eval())
@test_util.run_deprecated_v1
def testHandleAndValue(self):
with self.cached_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.multiply(a, c)
h, v = self.evaluate([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
@test_util.run_deprecated_v1
def testHandleCond(self):
with self.cached_session() as sess:
# Return a handle and a value
a = constant_op.constant(10)
b = constant_op.constant(5)
p = math_ops.less(a, b)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
p, h = self.evaluate([p, h])
# Run by feeding a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
y = math_ops.multiply(x, 10)
else:
y = math_ops.multiply(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
@test_util.run_deprecated_v1
def testHandleForLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
self.assertEqual(100, h.eval())
@test_util.run_deprecated_v1
def testHandleWhileLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
b = constant_op.constant(100)
p = math_ops.less(x, b)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
break
self.assertEqual(101, h.eval())
@test_util.run_deprecated_v1
def testHandleMover(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleDelete(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
self.evaluate(h).delete()
@test_util.run_deprecated_v1
def testHandleDeleteRaw(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
@test_util.run_deprecated_v1
def testMultiDevices(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = self.evaluate(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
b = constant_op.constant(2.0)
b_handle = self.evaluate(session_ops.get_session_handle(b))
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testHandleGC(self):
with self.cached_session() as sess:
# initial values live on CPU
with ops.device("/cpu:0"):
one = constant_op.constant(1, dtype=dtypes.float32)
one_handle = self.evaluate(session_ops.get_session_handle(one))
x_handle = self.evaluate(session_ops.get_session_handle(one))
# addition lives on GPU
with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
dtypes.float32)
add_op = math_ops.add(add_t1, add_t2)
add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
x_handle = sess.run(
add_output,
feed_dict={add_h1: one_handle.handle,
add_h2: x_handle.handle})
@test_util.run_deprecated_v1
def testHandlePlacement(self):
with self.cached_session() as sess:
a = constant_op.constant(1.0)
a_handle_op = session_ops.get_session_handle(a)
b = constant_op.constant(2.0)
b_handle_op = session_ops.get_session_handle(b)
a_handle = self.evaluate(a_handle_op)
b_handle = self.evaluate(b_handle_op)
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testFeedOneHandleDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.multiply(c, c)
h_c = self.evaluate(session_ops.get_session_handle(c))
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
@test_util.run_deprecated_v1
def testDirectHandleFeedOverlappingWithFetches(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
h_c = self.evaluate(session_ops.get_session_handle(c))
d = array_ops.identity(c)
c_val = sess.run(c, feed_dict={c: h_c})
self.assertAllClose(50.0, c_val)
d_val = sess.run(d, feed_dict={c: h_c})
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: 60.0})
self.assertAllClose(50.0, c_val)
self.assertAllClose(60.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: 60.0, d: h_c})
self.assertAllClose(60.0, c_val)
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: h_c})
self.assertAllClose(50.0, c_val)
self.assertAllClose(50.0, d_val)
@test_util.run_deprecated_v1
def testFeedTwoHandlesDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.div(a, b)
e = math_ops.subtract(c, d)
h_c = self.evaluate(session_ops.get_session_handle(c))
h_d = self.evaluate(session_ops.get_session_handle(d))
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
@test_util.run_deprecated_v1
def testFeedHandleToVariableDirectly(self):
with self.cached_session() as sess:
a = variables.Variable(12.0)
inc_a = state_ops.assign_add(a, 2.0)
b = math_ops.add(a, 5.0)
self.evaluate(a.initializer)
h_a_read = sess.run(session_ops.get_session_handle(a.read_value()))
self.assertAllClose(12.0, self.evaluate(a))
self.assertAllClose(17.0, sess.run(b, feed_dict={a: h_a_read}))
self.evaluate(inc_a)
self.assertAllClose(19.0, sess.run(b, feed_dict={a: h_a_read}))
if __name__ == "__main__":
test.main()
|
SessionOpsTest
|
python
|
pypa__pip
|
src/pip/_vendor/rich/traceback.py
|
{
"start": 7948,
"end": 8278
}
|
class ____:
exc_type: str
exc_value: str
syntax_error: Optional[_SyntaxError] = None
is_cause: bool = False
frames: List[Frame] = field(default_factory=list)
notes: List[str] = field(default_factory=list)
is_group: bool = False
exceptions: List["Trace"] = field(default_factory=list)
@dataclass
|
Stack
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_categorical.py
|
{
"start": 4716,
"end": 4905
}
|
class ____(Elemwise):
_parameters = ["frame"]
operation = M.copy
@functools.cached_property
def _meta(self):
return clear_known_categories(self.frame._meta)
|
AsUnknown
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/data_structs/data_structs.py
|
{
"start": 3483,
"end": 4433
}
|
class ____(IndexStruct):
"""A table of keywords mapping keywords to text chunks."""
table: Dict[str, Set[str]] = field(default_factory=dict)
def add_node(self, keywords: List[str], node: BaseNode) -> None:
"""Add text to table."""
for keyword in keywords:
if keyword not in self.table:
self.table[keyword] = set()
self.table[keyword].add(node.node_id)
@property
def node_ids(self) -> Set[str]:
"""Get all node ids."""
return set.union(*self.table.values())
@property
def keywords(self) -> Set[str]:
"""Get all keywords in the table."""
return set(self.table.keys())
@property
def size(self) -> int:
"""Get the size of the table."""
return len(self.table)
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.KEYWORD_TABLE
@dataclass
|
KeywordTable
|
python
|
pytorch__pytorch
|
torch/optim/swa_utils.py
|
{
"start": 15467,
"end": 21885
}
|
class ____(LRScheduler):
r"""Anneals the learning rate in each parameter group to a fixed value.
This learning rate scheduler is meant to be used with Stochastic Weight
Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`).
Args:
optimizer (torch.optim.Optimizer): wrapped optimizer
swa_lrs (float or list): the learning rate value for all param groups
together or separately for each group.
annealing_epochs (int): number of epochs in the annealing phase
(default: 10)
annealing_strategy (str): "cos" or "linear"; specifies the annealing
strategy: "cos" for cosine annealing, "linear" for linear annealing
(default: "cos")
last_epoch (int): the index of the last epoch (default: -1)
The :class:`SWALR` scheduler can be used together with other
schedulers to switch to a constant learning rate late in the training
as in the example below.
Example:
>>> # xdoctest: +SKIP("Undefined variables")
>>> loader, optimizer, model = ...
>>> lr_lambda = lambda epoch: 0.9
>>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer,
>>> lr_lambda=lr_lambda)
>>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer,
>>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05)
>>> swa_start = 160
>>> for i in range(300):
>>> for input, target in loader:
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
>>> if i > swa_start:
>>> swa_scheduler.step()
>>> else:
>>> scheduler.step()
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
https://arxiv.org/abs/1803.05407
"""
def __init__(
self,
optimizer: Optimizer,
swa_lr: float,
anneal_epochs=10,
anneal_strategy: Literal["cos", "linear"] = "cos",
last_epoch=-1,
) -> None: # noqa: D107
swa_lrs = _format_param("swa_lr", optimizer, swa_lr)
for swa_lr, group in zip(swa_lrs, optimizer.param_groups, strict=True):
group["swa_lr"] = swa_lr
if anneal_strategy not in ["cos", "linear"]:
raise ValueError(
"anneal_strategy must by one of 'cos' or 'linear', "
f"instead got {anneal_strategy}"
)
self._set_anneal_func(anneal_strategy)
if not isinstance(anneal_epochs, int) or anneal_epochs < 0:
raise ValueError(
f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}"
)
self.anneal_epochs = anneal_epochs
super().__init__(optimizer, last_epoch)
@staticmethod
def _linear_anneal(t):
return t
@staticmethod
def _cosine_anneal(t):
return (1 - math.cos(math.pi * t)) / 2
@staticmethod
def _get_initial_lr(lr, swa_lr, alpha):
if alpha == 1:
return swa_lr
return (lr - alpha * swa_lr) / (1 - alpha)
@override
def get_lr(self):
r"""Compute the next learning rate for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups`.
Uses :attr:`anneal_func` to interpolate between each group's
``group["lr"]`` and ``group["swa_lr"]`` over :attr:`anneal_epochs`
epochs. Once :attr:`anneal_epochs` is reached, keeps the learning rate
fixed at ``group["swa_lr"]``.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
.. note::
If you're trying to inspect the most recent learning rate, use
:meth:`get_last_lr()` instead.
.. note::
The returned :class:`~torch.Tensor`\s are copies, and never alias
the optimizer's ``group["lr"]``\s.
"""
# `_get_lr_called_within_step` is only available `_enable_get_lr_call`,
# so we ignore the type error here. See `LRScheduler.step()` for more details.
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.",
UserWarning,
stacklevel=2,
)
# Set in `LRScheduler._initial_step()`
step = self._step_count - 1
if self.anneal_epochs == 0:
step = max(1, step)
# pyrefly: ignore [no-matching-overload]
prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
prev_alpha = self.anneal_func(prev_t)
prev_lrs = [
self._get_initial_lr(group["lr"], group["swa_lr"], prev_alpha)
for group in self.optimizer.param_groups
]
# pyrefly: ignore [no-matching-overload]
t = max(0, min(1, step / max(1, self.anneal_epochs)))
alpha = self.anneal_func(t)
return [
group["swa_lr"] * alpha + lr * (1 - alpha)
for group, lr in zip(self.optimizer.param_groups, prev_lrs, strict=True)
]
def _set_anneal_func(self, anneal_strategy: Literal["cos", "linear"]) -> None:
self._anneal_strategy = anneal_strategy
if anneal_strategy == "cos":
self.anneal_func = self._cosine_anneal
else:
self.anneal_func = self._linear_anneal
@override
def state_dict(self) -> dict[str, Any]:
"""Return the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer or anneal_func.
"""
return {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "anneal_func")
}
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Load the scheduler's state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
self._set_anneal_func(self._anneal_strategy)
|
SWALR
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_super.py
|
{
"start": 3737,
"end": 7742
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
SuperMLP
|
python
|
tiangolo__fastapi
|
tests/test_response_class_no_mediatype.py
|
{
"start": 328,
"end": 3415
}
|
class ____(BaseModel):
errors: typing.List[Error]
@app.get(
"/a",
response_class=Response,
responses={500: {"description": "Error", "model": JsonApiError}},
)
async def a():
pass # pragma: no cover
@app.get("/b", responses={500: {"description": "Error", "model": Error}})
async def b():
pass # pragma: no cover
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/a": {
"get": {
"responses": {
"500": {
"description": "Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/JsonApiError"
}
}
},
},
"200": {"description": "Successful Response"},
},
"summary": "A",
"operationId": "a_a_get",
}
},
"/b": {
"get": {
"responses": {
"500": {
"description": "Error",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Error"}
}
},
},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "B",
"operationId": "b_b_get",
}
},
},
"components": {
"schemas": {
"Error": {
"title": "Error",
"required": ["status", "title"],
"type": "object",
"properties": {
"status": {"title": "Status", "type": "string"},
"title": {"title": "Title", "type": "string"},
},
},
"JsonApiError": {
"title": "JsonApiError",
"required": ["errors"],
"type": "object",
"properties": {
"errors": {
"title": "Errors",
"type": "array",
"items": {"$ref": "#/components/schemas/Error"},
}
},
},
}
},
}
|
JsonApiError
|
python
|
joke2k__faker
|
faker/providers/address/pl_PL/__init__.py
|
{
"start": 45,
"end": 14745
}
|
class ____(AddressProvider):
cities = (
"Warszawa",
"Kraków",
"Łódź",
"Wrocław",
"Poznań",
"Gdańsk",
"Szczecin",
"Bydgoszcz",
"Lublin",
"Katowice",
"Białystok",
"Gdynia",
"Częstochowa",
"Radom",
"Sosnowiec",
"Toruń",
"Kielce",
"Gliwice",
"Rzeszów",
"Zabrze",
"Bytom",
"Olsztyn",
"Bielsko-Biała",
"Ruda Śląska",
"Rybnik",
"Tychy",
"Dąbrowa Górnicza",
"Gorzów Wielkopolski",
"Elbląg",
"Płock",
"Opole",
"Wałbrzych",
"Zielona Góra",
"Włocławek",
"Tarnów",
"Chorzów",
"Koszalin",
"Kalisz",
"Legnica",
"Grudziądz",
"Słupsk",
"Jaworzno",
"Jastrzębie-Zdrój",
"Nowy Sącz",
"Jelenia Góra",
"Konin",
"Piotrków Trybunalski",
"Siedlce",
"Inowrocław",
"Mysłowice",
"Piła",
"Lubin",
"Ostrów Wielkopolski",
"Ostrowiec Świętokrzyski",
"Gniezno",
"Stargard Szczeciński",
"Siemianowice Śląskie",
"Suwałki",
"Głogów",
"Pabianice",
"Chełm",
"Zamość",
"Tomaszów Mazowiecki",
"Leszno",
"Przemyśl",
"Stalowa Wola",
"Kędzierzyn-Koźle",
"Łomża",
"Żory",
"Mielec",
"Tarnowskie Góry",
"Tczew",
"Bełchatów",
"Świdnica",
"Ełk",
"Pruszków",
"Będzin",
"Biała Podlaska",
"Zgierz",
"Piekary Śląskie",
"Racibórz",
"Legionowo",
"Ostrołęka",
"Świętochłowice",
"Starachowice",
"Zawiercie",
"Wejherowo",
"Puławy",
"Wodzisław Śląski",
"Starogard Gdański",
"Skierniewice",
"Tarnobrzeg",
"Skarżysko-Kamienna",
"Radomsko",
"Krosno",
"Rumia",
"Dębica",
"Kołobrzeg",
"Kutno",
"Nysa",
"Ciechanów",
"Otwock",
"Piaseczno",
"Zduńska Wola",
"Sieradz",
"Świnoujście",
"Żyrardów",
"Szczecinek",
"Świdnik",
"Chojnice",
"Nowa Sól",
"Oświęcim",
"Bolesławiec",
"Mińsk Mazowiecki",
"Mikołów",
"Jarosław",
"Sanok",
"Knurów",
"Malbork",
"Żary",
"Kwidzyn",
"Chrzanów",
"Sopot",
"Sochaczew",
"Wołomin",
"Oleśnica",
"Brzeg",
"Olkusz",
"Jasło",
"Cieszyn",
"Kraśnik",
"Lębork",
"Czechowice-Dziedzice",
"Dzierżoniów",
"Ostróda",
"Police",
"Nowy Targ",
"Iława",
"Czeladź",
"Myszków",
"Żywiec",
"Zgorzelec",
"Oława",
"Bielawa",
"Swarzędz",
"Mława",
"Ząbki",
"Łuków",
"Augustów",
"Śrem",
"Bochnia",
"Luboń",
"Giżycko",
"Grodzisk Mazowiecki",
"Łowicz",
"Krotoszyn",
"Września",
"Turek",
"Pruszcz Gdański",
"Brodnica",
"Gorlice",
"Czerwionka-Leszczyny",
"Kłodzko",
"Marki",
"Nowy Dwór Mazowiecki",
"Kętrzyn",
"Zakopane",
"Wyszków",
"Biłgoraj",
"Żagań",
"Bielsk Podlaski",
"Świecie",
"Wałcz",
"Jarocin",
"Pszczyna",
"Wągrowiec",
"Szczytno",
"Białogard",
"Sandomierz",
"Bartoszyce",
"Kluczbork",
"Lubliniec",
"Skawina",
"Jawor",
"Kościan",
"Wieluń",
"Kościerzyna",
"Nowa Ruda",
"Świebodzice",
"Koło",
"Piastów",
"Goleniów",
"Ostrów Mazowiecka",
"Polkowice",
"Lubartów",
"Zambrów",
"Płońsk",
"Reda",
"Łaziska Górne",
"Środa Wielkopolska",
)
street_prefixes = (
"ulica",
"aleja",
"plac",
)
streets = (
"Polna",
"Leśna",
"Słoneczna",
"Krótka",
"Szkolna",
"Ogrodowa",
"Lipowa",
"Brzozowa",
"Łąkowa",
"Kwiatowa",
"Sosnowa",
"Kościelna",
"Akacjowa",
"Parkowa",
"Zielona",
"Kolejowa",
"Sportowa",
"Dębowa",
"Kościuszki",
"Maja",
"Mickiewicza",
"Cicha",
"Spokojna",
"Klonowa",
"Spacerowa",
"Swierkowa",
"Kasztanowa",
"Nowa",
"Piaskowa",
"Sienkiewicza",
"Rózana",
"Topolowa",
"Wiśniowa",
"Dworcowa",
"Wiejska",
"Graniczna",
"Słowackiego",
"Długa",
"Wrzosowa",
"Konopnickiej",
"Boczna",
"Wąska",
"Wierzbowa",
"Jaśminowa",
"Wspólna",
"Modrzewiowa",
"Kopernika",
"Jana Pawła II",
"Poprzeczna",
"Wesoła",
"Pogodna",
"Żeromskiego",
"Rynek",
"Bukowa",
"Wojska Polskiego",
"Sadowa",
"Górna",
"Jodłowa",
"Wolności",
"Glówna",
"Młyńska",
"Strażacka",
"Prusa",
"Jesionowa",
"Przemysłowa",
"Osiedlowa",
"Wiosenna",
"Sikorskiego",
"Chopina",
"Południowa",
"Malinowa",
"Stawowa",
"Reymonta",
"Piłsudskiego",
"Zacisze",
"Cmentarna",
"Okrężna",
"Kochanowskiego",
"Armii Krajowej",
"Miła",
"Jasna",
"Wodna",
"Zamkowa",
"Witosa",
"Reja",
"Warszawska",
"Miodowa",
"Partyzantów",
"Krzywa",
"Kilińskiego",
"Dolna",
"Podgórna",
"Kreta",
"Jarzębinowa",
"Moniuszki",
"Targowa",
"Prosta",
"Orzeszkowej",
"Spółdzielcza",
"Jagodowa",
"Działkowa",
"Staszica",
"Orzechowa",
"Rzemieślnicza",
"Rzeczna",
"Bolesława Chrobrego",
"Fabryczna",
"Tęczowa",
"Chabrowa",
"Poziomkowa",
"Konwaliowa",
"Wyszyńskiego",
"Kalinowa",
"Północna",
"Matejki",
"Grunwaldzka",
"Cisowa",
"Nadrzeczna",
"Pocztowa",
"Zachodnia",
"Dąbrowskiego",
"Grabowa",
"Norwida",
"Źródlana",
"Asnyka",
"Gajowa",
"Paderewskiego",
"Listopada",
"Wyspiańskiego",
"Mostowa",
"Broniewskiego",
"Tuwima",
"Wschodnia",
"Jaworowa",
"Poznańska",
"Makowa",
"Bema",
"Jeziorna",
"Piękna",
"Czereśniowa",
"Mała",
"Krakowska",
"Radosna",
"Leszczynowa",
"Traugutta",
"Jadwigi",
"Rolna",
"Wyzwolenia",
"Piastowska",
"Grzybowa",
"Krasickiego",
"Podleśna",
"Żytnia",
"Złota",
"Bursztynowa",
"Żwirowa",
"Stycznia",
"Widokowa",
"Kazimierza Wielkiego",
"Kamienna",
"Jałowcowa",
"Morelowa",
"Mieszka I",
"Myśliwska",
"Łączna",
"Szpitalna",
"Wczasowa",
"Żurawia",
"Fiołkowa",
"Głowackiego",
"Rolnicza",
"Tulipanowa",
"Władysława Jagiełły",
"Dworska",
"Letnia",
"Liliowa",
"Owocowa",
"Pułaskiego",
"Stefana Batorego",
"Harcerska",
"Kołłątaja",
"Strzelecka",
"Kraszewskiego",
"Władysława Łokietka",
"Żwirki i Wigury",
"Wrocławska",
"Gdańska",
"Turystyczna",
"Niepodległości",
"Poniatowskiego",
"Korczaka",
"Rybacka",
"Narutowicza",
"Okrzei",
"Krucza",
"Jagiellońska",
"Świerczewskiego",
"Kasprowicza",
"Szeroka",
"Jana III Sobieskiego",
"Młynarska",
"Olchowa",
"Powstańców Śląskich",
"Rumiankowa",
"Stroma",
"Starowiejska",
"Mazowiecka",
"Lawendowa",
"Robotnicza",
"Zbożowa",
"Mokra",
"Powstańców Wielkopolskich",
"Towarowa",
"Dobra",
"Środkowa",
"Willowa",
"Zielna",
"Zdrojowa",
"Opolska",
"Agrestowa",
"Księżycowa",
"Zwycięstwa",
"Fredry",
"Letniskowa",
"Andersa",
"Baczynskiego",
"Batalionów Chłopskich",
"Dąbrowskiej",
"Orla",
"Skłodowskiej-Curie",
"Błękitna",
"Rubinowa",
"Brzoskwiniowa",
"Urocza",
"Gałczynskiego",
"Krasińskiego",
"Pomorska",
"Szymanowskiego",
"Jeżynowa",
"Czarnieckiego",
"Nałkowskiej",
"Zaciszna",
"Porzeczkowa",
"Krańcowa",
"Jesienna",
"Klasztorna",
"Irysowa",
"Niecała",
"Wybickiego",
"Nadbrzeżna",
"Szarych Szeregów",
"Wałowa",
"Słowicza",
"Strumykowa",
"Drzymały",
"Gołębia",
"Torowa",
"Cegielniana",
"Cyprysowa",
"Słowianska",
"Diamentowa",
"Waryńskiego",
"Częstochowska",
"Dojazdowa",
"Przechodnia",
"Hallera",
"Lubelska",
"Plater",
"Popiełuszki",
"Borówkowa",
"Chełmońskiego",
"Daszyńskiego",
"Plażowa",
"Tartaczna",
"Jabłoniowa",
"Kossaka",
"Skargi",
"Ludowa",
"Sokola",
"Azaliowa",
"Szmaragdowa",
"Lipca",
"Staffa",
"Tysiąclecia",
"Brzechwy",
"Jastrzębia",
"Kusocińskiego",
"Storczykowa",
"Wilcza",
"Górnicza",
"Szafirowa",
"Długosza",
"Handlowa",
"Krokusowa",
"Składowa",
"Widok",
"Perłowa",
"Skośna",
"Wypoczynkowa",
"Chmielna",
"Jaskółcza",
"Nowowiejska",
"Piwna",
"Śląska",
"Zaułek",
"Głogowa",
"Górska",
"Truskawkowa",
"Kaszubska",
"Kosynierów",
"Mazurska",
"Srebrna",
"Bociania",
"Ptasia",
"Cedrowa",
"Rycerska",
"Wieniawskiego",
"Żabia",
"Toruńska",
"Podmiejska",
"Słonecznikowa",
"Sowia",
"Stolarska",
"Powstańców",
"Sucharskiego",
"Bolesława Krzywoustego",
"Konarskiego",
"Szczęśliwa",
"Lazurowa",
"Miarki",
"Narcyzowa",
"Browarna",
"Konstytucji 3 Maja",
"Majowa",
"Miłosza",
"Malczewskiego",
"Orkana",
"Skrajna",
"Bankowa",
"Bydgoska",
"Piekarska",
"Żeglarska",
"Jana",
"Turkusowa",
"Tylna",
"Wysoka",
"Zakątek",
"Maczka",
"Morska",
"Rataja",
"Szewska",
"Podwale",
"Pałacowa",
"Magnoliowa",
"Ceglana",
"Sawickiej",
"Ściegiennego",
"Wiklinowa",
"Zakole",
"Borowa",
"Kolorowa",
"Lisia",
"Lotnicza",
"Sarnia",
"Wiązowa",
"Grottgera",
"Kolonia",
"Królewska",
"Promienna",
"Daleka",
"Jana Sobieskiego",
"Rejtana",
"Wiatraczna",
"Kaliska",
"Łanowa",
"Średnia",
"Wiślana",
"Wróblewskiego",
"Koralowa",
"Kruczkowskiego",
"Lelewela",
"Makuszyńskiego",
"Sybiraków",
"Kowalska",
"Morcinka",
"Odrzańska",
"Okulickiego",
"Solidarnosci",
"Zapolskiej",
"Łabędzia",
"Wojciecha",
"Bałtycka",
"Lwowska",
"Rajska",
"Korfantego",
"Pszenna",
"Ciasna",
"Floriana",
"Hutnicza",
"Kielecka",
)
regions = (
"Dolnośląskie",
"Kujawsko - pomorskie",
"Lubelskie",
"Lubuskie",
"Łódzkie",
"Małopolskie",
"Mazowieckie",
"Opolskie",
"Podkarpackie",
"Podlaskie",
"Pomorskie",
"Śląskie",
"Świętokrzyskie",
"Warmińsko - mazurskie",
"Wielkopolskie",
"Zachodniopomorskie",
)
building_number_formats = ("##", "###", "##/##")
postcode_formats = ("##-###",)
street_address_formats = (
"{{street_prefix}} {{street_name}} {{building_number}}",
"{{street_prefix_short}} {{street_name}} {{building_number}}",
)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
def street_prefix(self) -> str:
"""
Randomly returns a street prefix
:example: 'aleja'
"""
return self.random_element(self.street_prefixes)
def street_prefix_short(self) -> str:
"""
Randomly returns an abbreviation of the street prefix.
:example: 'al.'
"""
return self.random_element(self.street_prefixes)[:2] + "." # type: ignore
def street_name(self) -> str:
"""
Randomly returns a street name
:example: 'Wróblewskiego'
"""
return self.random_element(self.streets)
def city(self) -> str:
"""
Randomly returns a street name
:example: 'Konin'
"""
return self.random_element(self.cities)
def administrative_unit(self) -> str:
"""
:example: 'Wielkopolskie'
"""
return self.random_element(self.regions)
def postcode(self) -> str:
"""
:example: '62-200'
"""
return "%02d-%03d" % (self.generator.random.randint(1, 99), self.generator.random.randint(1, 999))
def zipcode(self) -> str:
"""
:example: '62-200'
"""
return self.postcode()
def postalcode(self) -> str:
"""
:example: '62-200'
"""
return self.postcode()
region = administrative_unit
|
Provider
|
python
|
automl__auto-sklearn
|
test/test_pipeline/components/data_preprocessing/test_variance_threshold.py
|
{
"start": 251,
"end": 1407
}
|
class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(VarianceThreshold)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue((transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
transformation, original = _test_preprocessing(
VarianceThreshold, make_sparse=True
)
self.assertEqual(transformation.shape, (100, 3))
self.assertTrue((transformation.toarray() == original.toarray()[:, 1:]).all())
self.assertIsInstance(transformation, sparse.csr_matrix)
transformations.append(transformation)
def test_preprocessing_dtype(self):
super(VarianceThresholdTest, self)._test_preprocessing_dtype(
VarianceThreshold, add_NaNs=False
)
|
VarianceThresholdTest
|
python
|
doocs__leetcode
|
solution/2000-2099/2031.Count Subarrays With More Ones Than Zeros/Solution.py
|
{
"start": 390,
"end": 806
}
|
class ____:
def subarraysWithMoreZerosThanOnes(self, nums: List[int]) -> int:
n = len(nums)
base = n + 1
tree = BinaryIndexedTree(n + base)
tree.update(base, 1)
mod = 10**9 + 7
ans = s = 0
for x in nums:
s += x or -1
ans += tree.query(s - 1 + base)
ans %= mod
tree.update(s + base, 1)
return ans
|
Solution
|
python
|
keras-team__keras
|
keras/src/layers/convolutional/base_depthwise_conv.py
|
{
"start": 571,
"end": 11608
}
|
class ____(Layer):
"""Abstract N-D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular convolution, depthwise convolution does not mix information
across different input channels.
The `depth_multiplier` argument determines how many filter are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the depthwise convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the depthwise convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`strides > 1` is incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the depthwsie convolution
kernel. If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
depth_multiplier,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
self.depth_multiplier = depth_multiplier
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self.data_format = self.data_format
if self.depth_multiplier is not None and self.depth_multiplier <= 0:
raise ValueError(
"Invalid value for argument `depth_multiplier`. Expected a "
"strictly positive value. Received "
f"depth_multiplier={self.depth_multiplier}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0. Received "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
depthwise_shape = self.kernel_size + (
input_channel,
self.depth_multiplier,
)
self.kernel = self.add_weight(
name="kernel",
shape=depthwise_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.depth_multiplier * input_channel,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
def _get_input_channel(self, input_shape):
if self.data_format == "channels_last":
input_channel = input_shape[-1]
else:
input_channel = input_shape[1]
return input_channel
def call(self, inputs):
input_channel = self._get_input_channel(inputs.shape)
outputs = ops.depthwise_conv(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (
self.depth_multiplier * input_channel,
)
else:
bias_shape = (1, self.depth_multiplier * input_channel) + (
1,
) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_channel = self._get_input_channel(input_shape)
return compute_conv_output_shape(
input_shape,
self.depth_multiplier * input_channel,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"depth_multiplier": self.depth_multiplier,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"depthwise_initializer": initializers.serialize(
self.depthwise_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"depthwise_regularizer": regularizers.serialize(
self.depthwise_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"depthwise_constraint": constraints.serialize(
self.depthwise_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
|
BaseDepthwiseConv
|
python
|
huggingface__transformers
|
src/transformers/models/siglip2/modular_siglip2.py
|
{
"start": 1459,
"end": 4866
}
|
class ____(SiglipVisionConfig):
r"""
This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
[google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
num_patches (`int`, *optional*, defaults to 256):
The number of patches in the image with the size of (`patch_size`, `patch_size`).
The image is resized to fill maximum of this number of patches, and to preserve
the aspect ratio. In case the resulted number of patches is lower, the image is
padded in "patch" dimension.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
Example:
```python
>>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
>>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
>>> configuration = Siglip2VisionConfig()
>>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
>>> model = Siglip2VisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
num_patches=256,
patch_size=16,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.num_patches = num_patches
del self.image_size
|
Siglip2VisionConfig
|
python
|
PyCQA__pylint
|
tests/functional/i/init_not_called.py
|
{
"start": 1254,
"end": 1583
}
|
class ____(Parent):
@overload
def __init__(self, num: int):
...
@overload
def __init__(self, num: float):
...
def __init__(self, num):
super().__init__(round(num))
# https://github.com/pylint-dev/pylint/issues/7742
# Crash when parent class has a class attribute named `__init__`
|
Child
|
python
|
django__django
|
tests/migrations/test_migrations_conflict/0002_second.py
|
{
"start": 43,
"end": 648
}
|
class ____(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
(
"author",
models.ForeignKey("migrations.Author", models.SET_NULL, null=True),
),
],
),
]
|
Migration
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 296292,
"end": 298666
}
|
class ____(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
stream = self.stream.analyse_expressions(env)
self.stream = stream.coerce_to_pyobject(env)
arg_tuple = self.arg_tuple.analyse_expressions(env)
self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
|
PrintStatNode
|
python
|
tensorflow__tensorflow
|
tensorflow/python/client/session.py
|
{
"start": 11808,
"end": 14695
}
|
class ____(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError(f'Argument `fetch` = {fetch} has invalid type '
f'"{type(fetch).__name__}" must be a string or Tensor. '
f'({str(e)})')
except ValueError as e:
raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as '
f'a Tensor. ({str(e)})')
except KeyError as e:
raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as '
f'a Tensor. ({str(e)})')
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(id(f))
if j is None:
j = len(seen_fetches)
seen_fetches[id(f)] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
|
_ElementFetchMapper
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/logging_tests/test_logging.py
|
{
"start": 6866,
"end": 21928
}
|
class ____(logging.Filter):
def __init__(self, filter_level):
super().__init__()
self.filter_level = filter_level
def filter(self, record):
record.msg = f"{record.msg} default logger is {DAGSTER_DEFAULT_LOGGER}"
return record.levelno == self.filter_level
def test_capture_handler_log_records():
capture_handler = CaptureHandler()
dl = DagsterLogManager.create(
loggers=[],
handlers=[capture_handler],
dagster_run=dg.DagsterRun(run_id="123456", job_name="pipeline"),
).with_tags(step_key="some_step")
dl.info("info")
dl.critical("critical error", extra={"foo": "bar"})
assert len(capture_handler.captured) == 2
captured_info_record = capture_handler.captured[0]
assert captured_info_record.name == "dagster"
assert captured_info_record.msg == "pipeline - 123456 - some_step - info"
assert captured_info_record.levelno == logging.INFO
captured_critical_record = capture_handler.captured[1]
assert captured_critical_record.name == "dagster"
assert captured_critical_record.msg == "pipeline - 123456 - some_step - critical error"
assert captured_critical_record.levelno == logging.CRITICAL
assert captured_critical_record.foo == "bar"
def test_default_context_logging():
called = {}
@dg.op
def default_context_op(context):
called["yes"] = True
for logger in context.log._dagster_handler._loggers: # noqa: SLF001
assert logger.level == logging.DEBUG
wrap_op_in_graph_and_execute(default_context_op)
assert called["yes"]
def test_colored_console_logger_with_integer_log_level():
@dg.job
def pipe():
pass
colored_console_logger.logger_fn(
dg.InitLoggerContext(
{"name": "dagster", "log_level": 4},
dg.colored_console_logger,
job_def=pipe,
)
)
def test_json_console_logger(capsys):
@dg.op
def hello_world(context):
context.log.info("Hello, world!")
wrap_op_in_graph_and_execute(
hello_world,
logger_defs={"json": dg.json_console_logger},
run_config={"loggers": {"json": {"config": {}}}},
)
captured = capsys.readouterr()
found_msg = False
for line in captured.err.split("\n"):
if line:
parsed = json.loads(line)
assert "dagster_event" not in parsed
if parsed[LOG_RECORD_METADATA_ATTR]["orig_message"] == "Hello, world!":
found_msg = True
assert found_msg
def test_json_console_logger_run_failure(capsys):
@dg.op
def failing_op(context):
context.log.info("Hello, world!")
assert False
wrap_op_in_graph_and_execute(
failing_op,
logger_defs={"json": dg.json_console_logger},
run_config={"loggers": {"json": {"config": {}}}},
raise_on_error=False,
)
captured = capsys.readouterr()
found_msg = False
for line in captured.err.split("\n"):
if line:
parsed = json.loads(line)
assert "dagster_event" not in parsed
if parsed[LOG_RECORD_METADATA_ATTR]["orig_message"] == "Hello, world!":
found_msg = True
assert found_msg
def test_job_logging(capsys):
@dg.op
def foo(context):
context.log.info("bar")
return 0
@dg.op
def foo2(context, _in1):
context.log.info("baz")
@dg.job
def pipe():
foo2(foo())
pipe.execute_in_process()
captured = capsys.readouterr()
expected_log_regexes = [
r"dagster - INFO - pipe - [a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-"
r"[a-f0-9]{12} - foo - bar",
r"dagster - INFO - pipe - [a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-"
r"[a-f0-9]{12} - foo2 - baz",
]
for expected_log_regex in expected_log_regexes:
assert re.search(expected_log_regex, captured.err, re.MULTILINE)
def test_resource_logging(capsys):
@dg.resource
def foo_resource(init_context):
def fn():
init_context.log.info("test logging from foo resource")
return fn
@dg.resource
def bar_resource(init_context):
def fn():
init_context.log.info("test logging from bar resource")
return fn
@dg.op(required_resource_keys={"foo", "bar"})
def process(context):
context.resources.foo()
context.resources.bar()
wrap_op_in_graph_and_execute(
process,
resources={"foo": foo_resource, "bar": bar_resource},
)
captured = capsys.readouterr()
expected_log_regexes = [
r"dagster - INFO - resource:foo - [a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-"
r"[a-f0-9]{12} - process - test logging from foo resource",
r"dagster - INFO - resource:bar - [a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-"
r"[a-f0-9]{12} - process - test logging from bar resource",
]
for expected_log_regex in expected_log_regexes:
assert re.search(expected_log_regex, captured.err, re.MULTILINE)
def test_io_context_logging(capsys):
@dg.op
def logged_op(context):
context.get_step_execution_context().get_output_context(
StepOutputHandle("logged_op", "result")
).log.debug("test OUTPUT debug logging from logged_op.")
context.get_step_execution_context().for_input_manager(
"logged_op", {}, {}, None, source_handle=None
).log.debug("test INPUT debug logging from logged_op.")
result = wrap_op_in_graph_and_execute(logged_op)
assert result.success
captured = capsys.readouterr()
assert re.search("test OUTPUT debug logging from logged_op.", captured.err, re.MULTILINE)
assert re.search("test INPUT debug logging from logged_op.", captured.err, re.MULTILINE)
@dg.op
def log_op(context):
context.log.info("Hello world")
context.log.error("My test error")
@dg.job
def log_job():
log_op()
def test_conf_file_logging(capsys):
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"class": "logging.StreamHandler",
# "class": "logging.FileHandler",
"level": "INFO",
# "filename": "/Users/smackesey/stm/desktop/mydaglog.log",
# "mode": "a",
"stream": "ext://sys.stdout",
},
"handlerTwo": {
"class": "logging.StreamHandler",
"level": "ERROR",
"stream": "ext://sys.stdout",
},
},
}
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
out, _ = capsys.readouterr()
# currently the format of dict-inputted handlers is undetermined, so
# we only check for the expected message
assert re.search(r"Hello world", out)
assert len(re.findall(r"My test error", out)) == 2
def test_custom_class_handler(capsys):
output_msg = "Record handled: "
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"()": "dagster_tests.logging_tests.test_logging.CaptureHandler",
"level": "INFO",
"output": output_msg,
}
},
},
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
out, _ = capsys.readouterr()
assert re.search(r".*Record handled: .*Hello world.*", out)
def test_error_when_logger_defined_yaml():
config_settings = {
"python_logs": {
"dagster_handler_config": {
"loggers": {
"my_logger": {"level": "WARNING", "propagate": False},
},
},
}
}
with pytest.raises(dg.DagsterInvalidConfigError):
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
def test_conf_log_formatter(capsys):
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout",
"formatter": "myFormatter",
},
},
"formatters": {
"myFormatter": {
"format": "My formatted message: %(message)s",
}
},
}
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
out, _ = capsys.readouterr()
# currently the format of dict-inputted handlers is undetermined, so
# we only check for the expected message
assert re.search(r"My formatted message: ", out)
def test_conf_log_formatter_custom(capsys):
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout",
"formatter": "myFormatter",
},
},
"formatters": {
"myFormatter": {
"()": "dagster_tests.logging_tests.test_logging.CustomFormatter",
}
},
}
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
out, _ = capsys.readouterr()
assert re.search(r"I was formatted", out)
def test_conf_log_filter(capsys):
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stderr",
"formatter": "myFormatter",
"filters": ["myFilter"],
},
},
"formatters": {
"myFormatter": {
"format": "Filter me out: %(message)s",
}
},
"filters": {"myFilter": {"name": "none"}},
}
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
_, err = capsys.readouterr()
assert not re.search(r"Filter me out", err)
def test_conf_log_filter_custom_with_context(capsys):
config_settings = {
"python_logs": {
"dagster_handler_config": {
"handlers": {
"handlerOne": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout",
"filters": ["myFilter"],
},
},
"filters": {
"myFilter": {
"()": "dagster_tests.logging_tests.test_logging.CustomLevelFilter",
"filter_level": logging.ERROR,
}
},
}
}
}
with dg.instance_for_test(overrides=config_settings) as instance:
log_job.execute_in_process(instance=instance)
out, _ = capsys.readouterr()
assert not re.search(r"Hello world", out)
assert re.search(rf"My test error default logger is {DAGSTER_DEFAULT_LOGGER}", out)
def test_python_multithread_context_logging():
def logging_background_thread(thread_name, context):
for i in range(1, 4):
context.log.info(f"Background thread: {thread_name}, message #: {i}")
@dg.op
def logged_op(context):
threads = []
for thread_name in range(1, 5):
thread = threading.Thread(
target=partial(logging_background_thread, thread_name, context),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@dg.job
def foo_job():
logged_op()
with dg.instance_for_test() as instance:
result = foo_job.execute_in_process(instance=instance)
logs = instance.event_log_storage.get_logs_for_run(result.run_id)
relevant_logs = [log for log in logs if "Background thread: " in log.user_message]
# We would expect 3 log messages per our 4 threads
assert len(relevant_logs) == 3 * 4
def test_python_log_level_context_logging():
@dg.op
def logged_op(context):
context.log.error("some error")
@dg.job
def foo_job():
logged_op()
with dg.instance_for_test() as instance:
result = foo_job.execute_in_process(instance=instance)
logs_default = instance.event_log_storage.get_logs_for_run(result.run_id)
with dg.instance_for_test(
overrides={"python_logs": {"python_log_level": "CRITICAL"}}
) as instance:
result = foo_job.execute_in_process(instance=instance)
logs_critical = instance.event_log_storage.get_logs_for_run(result.run_id)
assert len(logs_critical) > 0 # DagsterEvents should still be logged
assert len(logs_default) == len(logs_critical) + 1
def test_system_logging():
with dg.instance_for_test(
overrides={"python_logs": {"python_log_level": "CRITICAL"}}
) as instance:
assert dg.default_system_loggers(instance) == [
(dg.colored_console_logger, {"name": "dagster", "log_level": "CRITICAL"})
]
assert dg.default_system_loggers(None) == [
(dg.colored_console_logger, {"name": "dagster", "log_level": "DEBUG"})
]
@dg.op
def logger_op():
pass
@dg.job
def logger_job():
logger_op()
def test_system_logger_output(capfd):
with dg.instance_for_test() as instance:
dg.execute_job(dg.reconstructable(logger_job), instance)
captured = capfd.readouterr()
# System logs in stderr at default log level
assert "STEP_WORKER_STARTING" in captured.err
with dg.instance_for_test(overrides={"python_logs": {"python_log_level": "INFO"}}) as instance:
dg.execute_job(dg.reconstructable(logger_job), instance)
captured = capfd.readouterr()
# but not at raised log level above DEBUG
assert "STEP_WORKER_STARTING" not in captured.err
|
CustomLevelFilter
|
python
|
has2k1__plotnine
|
plotnine/_mpl/layout_manager/_layout_items.py
|
{
"start": 1678,
"end": 4958
}
|
class ____:
"""
Calculate space taken up by an artist
"""
# fig: Figure
# renderer: RendererBase
plot: ggplot
def __post_init__(self):
self.figure = self.plot.figure
self.renderer = cast("RendererBase", self.plot.figure._get_renderer()) # pyright: ignore
def bbox(self, artist: Artist) -> Bbox:
"""
Bounding box of artist in figure coordinates
"""
return bbox_in_figure_space(artist, self.figure, self.renderer)
def tight_bbox(self, artist: Artist) -> Bbox:
"""
Bounding box of artist and its children in figure coordinates
"""
return tight_bbox_in_figure_space(artist, self.figure, self.renderer)
def width(self, artist: Artist) -> float:
"""
Width of artist in figure space
"""
return self.bbox(artist).width
def tight_width(self, artist: Artist) -> float:
"""
Width of artist and its children in figure space
"""
return self.tight_bbox(artist).width
def height(self, artist: Artist) -> float:
"""
Height of artist in figure space
"""
return self.bbox(artist).height
def tight_height(self, artist: Artist) -> float:
"""
Height of artist and its children in figure space
"""
return self.tight_bbox(artist).height
def size(self, artist: Artist) -> tuple[float, float]:
"""
(width, height) of artist in figure space
"""
bbox = self.bbox(artist)
return (bbox.width, bbox.height)
def tight_size(self, artist: Artist) -> tuple[float, float]:
"""
(width, height) of artist and its children in figure space
"""
bbox = self.tight_bbox(artist)
return (bbox.width, bbox.height)
def left_x(self, artist: Artist) -> float:
"""
x value of the left edge of the artist
---
x |
---
"""
return self.bbox(artist).min[0]
def right_x(self, artist: Artist) -> float:
"""
x value of the left edge of the artist
---
| x
---
"""
return self.bbox(artist).max[0]
def top_y(self, artist: Artist) -> float:
"""
y value of the top edge of the artist
-y-
| |
---
"""
return self.bbox(artist).max[1]
def bottom_y(self, artist: Artist) -> float:
"""
y value of the bottom edge of the artist
---
| |
-y-
"""
return self.bbox(artist).min[1]
def max_width(self, artists: Sequence[Artist]) -> float:
"""
Return the maximum width of list of artists
"""
widths = [
bbox_in_figure_space(a, self.figure, self.renderer).width
for a in artists
]
return max(widths) if len(widths) else 0
def max_height(self, artists: Sequence[Artist]) -> float:
"""
Return the maximum height of list of artists
"""
heights = [
bbox_in_figure_space(a, self.figure, self.renderer).height
for a in artists
]
return max(heights) if len(heights) else 0
@dataclass
|
Calc
|
python
|
django__django
|
tests/file_storage/test_inmemory_storage.py
|
{
"start": 5755,
"end": 9446
}
|
class ____(unittest.TestCase):
def setUp(self):
self.storage = InMemoryStorage()
def test_file_modified_time(self):
"""
File modified time should change after file changing
"""
self.storage.save("file.txt", ContentFile("test"))
modified_time = self.storage.get_modified_time("file.txt")
time.sleep(0.1)
with self.storage.open("file.txt", "w") as fd:
fd.write("new content")
new_modified_time = self.storage.get_modified_time("file.txt")
self.assertTrue(new_modified_time > modified_time)
def test_file_accessed_time(self):
"""File accessed time should change after consecutive opening."""
self.storage.save("file.txt", ContentFile("test"))
accessed_time = self.storage.get_accessed_time("file.txt")
time.sleep(0.1)
self.storage.open("file.txt", "r")
new_accessed_time = self.storage.get_accessed_time("file.txt")
self.assertGreater(new_accessed_time, accessed_time)
def test_file_created_time(self):
"""File creation time should not change after I/O operations."""
self.storage.save("file.txt", ContentFile("test"))
created_time = self.storage.get_created_time("file.txt")
time.sleep(0.1)
# File opening doesn't change creation time.
file = self.storage.open("file.txt", "r")
after_open_created_time = self.storage.get_created_time("file.txt")
self.assertEqual(after_open_created_time, created_time)
# Writing to a file doesn't change its creation time.
file.write("New test")
self.storage.save("file.txt", file)
after_write_created_time = self.storage.get_created_time("file.txt")
self.assertEqual(after_write_created_time, created_time)
def test_directory_times_changing_after_file_creation(self):
"""
Directory modified and accessed time should change when a new file is
created inside.
"""
self.storage.save("dir/file1.txt", ContentFile("test"))
created_time = self.storage.get_created_time("dir")
modified_time = self.storage.get_modified_time("dir")
accessed_time = self.storage.get_accessed_time("dir")
time.sleep(0.1)
self.storage.save("dir/file2.txt", ContentFile("test"))
new_modified_time = self.storage.get_modified_time("dir")
new_accessed_time = self.storage.get_accessed_time("dir")
after_file_creation_created_time = self.storage.get_created_time("dir")
self.assertGreater(new_modified_time, modified_time)
self.assertGreater(new_accessed_time, accessed_time)
self.assertEqual(created_time, after_file_creation_created_time)
def test_directory_times_changing_after_file_deletion(self):
"""
Directory modified and accessed time should change when a new file is
deleted inside.
"""
self.storage.save("dir/file.txt", ContentFile("test"))
created_time = self.storage.get_created_time("dir")
modified_time = self.storage.get_modified_time("dir")
accessed_time = self.storage.get_accessed_time("dir")
time.sleep(0.1)
self.storage.delete("dir/file.txt")
new_modified_time = self.storage.get_modified_time("dir")
new_accessed_time = self.storage.get_accessed_time("dir")
after_file_deletion_created_time = self.storage.get_created_time("dir")
self.assertGreater(new_modified_time, modified_time)
self.assertGreater(new_accessed_time, accessed_time)
self.assertEqual(created_time, after_file_deletion_created_time)
|
MemoryStorageTimesTests
|
python
|
pytorch__pytorch
|
torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py
|
{
"start": 184,
"end": 2197
}
|
class ____(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} inside a LSTM, we have two packed weight matrices
- weight_ih_l{k}
- weight_hh_l{k}
These tensors pack the weights for the 4 linear layers together for efficiency.
[W_ii | W_if | W_ig | W_io]
Pruning this tensor directly will lead to weights being misassigned when unpacked.
To ensure that each packed linear layer is pruned the same amount:
1. We split the packed weight into the 4 constituent linear parts
2. Update the mask for each individual piece using saliency individually
This applies to both weight_ih_l{k} and weight_hh_l{k}.
"""
def update_mask(self, module: nn.Module, tensor_name: str, **kwargs: Any) -> None:
weights = getattr(module, tensor_name)
for p in getattr(module.parametrizations, tensor_name):
if isinstance(p, FakeStructuredSparsity):
mask = cast(torch.Tensor, p.mask)
# select weights based on magnitude
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
# take norm over all but first dim
dims = tuple(range(1, weights.dim()))
saliency = weights.norm(dim=dims, p=1)
# handle weights in 4 groups
split_size = len(mask) // 4
masks = torch.split(mask, split_size)
saliencies = torch.split(saliency, split_size)
for keep_mask, sal in zip(masks, saliencies):
# mask smallest k values to be removed
k = int(len(keep_mask) * kwargs["sparsity_level"])
prune = sal.topk(k, largest=False, sorted=False).indices
keep_mask.data[prune] = False # modifies underlying p.mask directly
|
LSTMSaliencyPruner
|
python
|
django__django
|
tests/cache/tests.py
|
{
"start": 1929,
"end": 2817
}
|
class ____:
def __getstate__(self):
raise pickle.PickleError()
def empty_response(request):
return HttpResponse()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
"Cache key contains characters that will cause errors if used with memcached: %r"
)
def retry(retries=3, delay=1):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
attempts = 0
while attempts < retries:
try:
return func(*args, **kwargs)
except AssertionError:
attempts += 1
if attempts >= retries:
raise
time.sleep(delay)
return wrapper
return decorator
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
}
)
|
Unpicklable
|
python
|
kubernetes-client__python
|
kubernetes/client/api/openid_api.py
|
{
"start": 543,
"end": 5464
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_service_account_issuer_open_id_keyset(self, **kwargs): # noqa: E501
"""get_service_account_issuer_open_id_keyset # noqa: E501
get service account issuer OpenID JSON Web Key Set (contains public token verification keys) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_account_issuer_open_id_keyset(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_service_account_issuer_open_id_keyset_with_http_info(**kwargs) # noqa: E501
def get_service_account_issuer_open_id_keyset_with_http_info(self, **kwargs): # noqa: E501
"""get_service_account_issuer_open_id_keyset # noqa: E501
get service account issuer OpenID JSON Web Key Set (contains public token verification keys) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_account_issuer_open_id_keyset_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_service_account_issuer_open_id_keyset" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/jwk-set+json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/openid/v1/jwks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
OpenidApi
|
python
|
gevent__gevent
|
src/greentest/3.10/test_httplib.py
|
{
"start": 3797,
"end": 14126
}
|
class ____(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
|
HeaderTests
|
python
|
pypa__pip
|
src/pip/_vendor/rich/_ratio.py
|
{
"start": 115,
"end": 5325
}
|
class ____(Protocol):
"""Any object that defines an edge (such as Layout)."""
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
"""Divide total space to satisfy size, ratio, and minimum_size, constraints.
The returned list of integers should add up to total in most cases, unless it is
impossible to satisfy all the constraints. For instance, if there are two edges
with a minimum size of 20 each and `total` is 30 then the returned list will be
greater than total. In practice, this would mean that a Layout object would
clip the rows that would overflow the screen height.
Args:
total (int): Total number of characters.
edges (List[Edge]): Edges within total space.
Returns:
List[int]: Number of characters for each edge.
"""
# Size of edge or None for yet to be determined
sizes = [(edge.size or None) for edge in edges]
_Fraction = Fraction
# While any edges haven't been calculated
while None in sizes:
# Get flexible edges and index to map these back on to sizes list
flexible_edges = [
(index, edge)
for index, (size, edge) in enumerate(zip(sizes, edges))
if size is None
]
# Remaining space in total
remaining = total - sum(size or 0 for size in sizes)
if remaining <= 0:
# No room for flexible edges
return [
((edge.minimum_size or 1) if size is None else size)
for size, edge in zip(sizes, edges)
]
# Calculate number of characters in a ratio portion
portion = _Fraction(
remaining, sum((edge.ratio or 1) for _, edge in flexible_edges)
)
# If any edges will be less than their minimum, replace size with the minimum
for index, edge in flexible_edges:
if portion * edge.ratio <= edge.minimum_size:
sizes[index] = edge.minimum_size
# New fixed size will invalidate calculations, so we need to repeat the process
break
else:
# Distribute flexible space and compensate for rounding error
# Since edge sizes can only be integers we need to add the remainder
# to the following line
remainder = _Fraction(0)
for index, edge in flexible_edges:
size, remainder = divmod(portion * edge.ratio + remainder, 1)
sizes[index] = size
break
# Sizes now contains integers only
return cast(List[int], sizes)
def ratio_reduce(
total: int, ratios: List[int], maximums: List[int], values: List[int]
) -> List[int]:
"""Divide an integer total in to parts based on ratios.
Args:
total (int): The total to divide.
ratios (List[int]): A list of integer ratios.
maximums (List[int]): List of maximums values for each slot.
values (List[int]): List of values
Returns:
List[int]: A list of integers guaranteed to sum to total.
"""
ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
total_ratio = sum(ratios)
if not total_ratio:
return values[:]
total_remaining = total
result: List[int] = []
append = result.append
for ratio, maximum, value in zip(ratios, maximums, values):
if ratio and total_ratio > 0:
distributed = min(maximum, round(ratio * total_remaining / total_ratio))
append(value - distributed)
total_remaining -= distributed
total_ratio -= ratio
else:
append(value)
return result
def ratio_distribute(
total: int, ratios: List[int], minimums: Optional[List[int]] = None
) -> List[int]:
"""Distribute an integer total in to parts based on ratios.
Args:
total (int): The total to divide.
ratios (List[int]): A list of integer ratios.
minimums (List[int]): List of minimum values for each slot.
Returns:
List[int]: A list of integers guaranteed to sum to total.
"""
if minimums:
ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
total_ratio = sum(ratios)
assert total_ratio > 0, "Sum of ratios must be > 0"
total_remaining = total
distributed_total: List[int] = []
append = distributed_total.append
if minimums is None:
_minimums = [0] * len(ratios)
else:
_minimums = minimums
for ratio, minimum in zip(ratios, _minimums):
if total_ratio > 0:
distributed = max(minimum, ceil(ratio * total_remaining / total_ratio))
else:
distributed = total_remaining
append(distributed)
total_ratio -= ratio
total_remaining -= distributed
return distributed_total
if __name__ == "__main__":
from dataclasses import dataclass
@dataclass
class E:
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
print(sum(resolved))
|
Edge
|
python
|
tiangolo__fastapi
|
docs_src/separate_openapi_schemas/tutorial002.py
|
{
"start": 93,
"end": 524
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
app = FastAPI(separate_input_output_schemas=False)
@app.post("/items/")
def create_item(item: Item):
return item
@app.get("/items/")
def read_items() -> List[Item]:
return [
Item(
name="Portal Gun",
description="Device to travel through the multi-rick-verse",
),
Item(name="Plumbus"),
]
|
Item
|
python
|
PrefectHQ__prefect
|
src/prefect/concurrency/context.py
|
{
"start": 229,
"end": 922
}
|
class ____(ContextModel):
__var__: ClassVar[ContextVar[Self]] = ContextVar("concurrency")
# Track the leases that have been acquired but were not able to be released
# due to cancellation or some other error. These leases are revoked when
# the context manager exits.
cleanup_lease_ids: list[UUID] = Field(default_factory=lambda: [])
def __exit__(self, *exc_info: Any) -> None:
if self.cleanup_lease_ids:
with get_client(sync_client=True) as client:
for lease_id in self.cleanup_lease_ids:
client.release_concurrency_slots_with_lease(lease_id=lease_id)
return super().__exit__(*exc_info)
|
ConcurrencyContext
|
python
|
huggingface__transformers
|
src/transformers/data/data_collator.py
|
{
"start": 10109,
"end": 17931
}
|
class ____(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
padding: bool | str | PaddingStrategy = True
max_length: int | None = None
pad_to_multiple_of: int | None = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0] else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
batch = pad_without_fast_tokenizer_warning(
self.tokenizer,
no_labels_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if labels is None:
return batch
sequence_length = batch["input_ids"].shape[1]
padding_side = self.tokenizer.padding_side
def to_list(tensor_or_iterable):
if isinstance(tensor_or_iterable, torch.Tensor):
return tensor_or_iterable.tolist()
return list(tensor_or_iterable)
if padding_side == "right":
batch[label_name] = [
to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
]
batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
return batch
def numpy_call(self, features):
label_name = "label" if "label" in features[0] else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
batch = pad_without_fast_tokenizer_warning(
self.tokenizer,
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="np" if labels is None else None,
)
if labels is None:
return batch
sequence_length = np.array(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
return batch
def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: int | None = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
import torch
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple, np.ndarray)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
# Check if padding is necessary.
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
if not isinstance(examples, torch.Tensor):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer.pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: int | None = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [np.array(e, dtype=np.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return np.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer.pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
@dataclass
|
DataCollatorForTokenClassification
|
python
|
pytorch__pytorch
|
test/dynamo/test_subclasses.py
|
{
"start": 102490,
"end": 103278
}
|
class ____(torch.nn.Module):
def forward(self, primals_1: "f32[3, 4]", primals_2: "f32[3, 4]", primals_3: "Sym(3)", primals_4: "Sym(4)", primals_5: "Sym(3)", primals_6: "Sym(4)"):
clone: "f32[3, 4]" = torch.ops.aten.clone.default(primals_1); primals_1 = None
clone_1: "f32[3, 4]" = torch.ops.aten.clone.default(primals_2); primals_2 = None
mul: "Sym(12)" = primals_5 * primals_6
view: "f32[12]" = torch.ops.aten.view.default(clone, [mul])
view_1: "f32[12]" = torch.ops.aten.view.default(clone_1, [mul]); clone_1 = None
return [clone, view, view_1, mul, primals_5, primals_6]
""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(bw[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/linalg/test_linalg.py
|
{
"start": 11613,
"end": 11891
}
|
class ____(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={"hermitian"}, exclude={"generalized", "size-0"})
def test_empty_herm_cases(self):
self.check_cases(require={"hermitian", "size-0"}, exclude={"generalized"})
|
HermitianTestCase
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0147_addons_filetreediff_enabled_by_default.py
|
{
"start": 349,
"end": 936
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0146_addons_filetreediff_ignored_files"),
]
operations = [
migrations.RunPython(migrate),
migrations.AlterField(
model_name="addonsconfig",
name="filetreediff_enabled",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="historicaladdonsconfig",
name="filetreediff_enabled",
field=models.BooleanField(default=True),
),
]
|
Migration
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/nn/api/remote_module_test.py
|
{
"start": 1759,
"end": 2006
}
|
class ____:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
@torch.jit.interface
|
MyModuleInterface
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-cells-in-overlapping-horizontal-and-vertical-substrings.py
|
{
"start": 50,
"end": 1651
}
|
class ____(object):
def countCells(self, grid, pattern):
"""
:type grid: List[List[str]]
:type pattern: str
:rtype: int
"""
# Template: https://cp-algorithms.com/string/z-function.html
def z_function(s): # Time: O(n), Space: O(n)
z = [0]*len(s)
l, r = 0, 0
for i in xrange(1, len(z)):
if i <= r:
z[i] = min(r-i+1, z[i-l])
while i+z[i] < len(z) and s[z[i]] == s[i+z[i]]:
z[i] += 1
if i+z[i]-1 > r:
l, r = i, i+z[i]-1
return z
def check(is_horizontal):
n, m = len(grid), len(grid[0])
if not is_horizontal:
n, m = m, n
p = len(pattern)
s = list(pattern)
if is_horizontal:
s.extend(grid[i][j] for i in xrange(n) for j in xrange(m))
else:
s.extend(grid[j][i] for i in xrange(n) for j in xrange(m))
lookup = [[False]*m for _ in xrange(n)]
z = z_function(s)
curr = 0
for i in xrange(p, len(s)):
if z[i] < p:
continue
curr = max(curr, i-p)
while curr <= (i-p)+p-1:
lookup[curr//m][curr%m] = True
curr += 1
return lookup
lookup1 = check(True)
lookup2 = check(False)
return sum(lookup1[i][j] and lookup2[j][i] for i in xrange(len(grid)) for j in xrange(len(grid[0])))
|
Solution
|
python
|
sympy__sympy
|
sympy/core/operations.py
|
{
"start": 17827,
"end": 20777
}
|
class ____(AssocOp):
"""
Join/meet operations of an algebraic lattice[1].
Explanation
===========
These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
Common examples are AND, OR, Union, Intersection, max or min. They have an
identity element (op(identity, a) = a) and an absorbing element
conventionally called zero (op(zero, a) = zero).
This is an abstract base class, concrete derived classes must declare
attributes zero and identity. All defining properties are then respected.
Examples
========
>>> from sympy import Integer
>>> from sympy.core.operations import LatticeOp
>>> class my_join(LatticeOp):
... zero = Integer(0)
... identity = Integer(1)
>>> my_join(2, 3) == my_join(3, 2)
True
>>> my_join(2, my_join(3, 4)) == my_join(2, 3, 4)
True
>>> my_join(0, 1, 4, 2, 3, 4)
0
>>> my_join(1, 2)
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Lattice_%28order%29
"""
is_commutative = True
def __new__(cls, *args, evaluate=None, **options):
args = (_sympify_(arg) for arg in args)
if evaluate is None:
evaluate = global_parameters.evaluate
if not evaluate:
obj = super().__new__(cls, *args, evaluate=False, **options)
obj._argset = frozenset(args)
return obj
try:
# /!\ args is a generator and _new_args_filter
# must be careful to handle as such; this
# is done so short-circuiting can be done
# without having to sympify all values
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return sympify(cls.zero)
if not _args:
return sympify(cls.identity)
elif len(_args) == 1:
return set(_args).pop()
else:
# XXX in almost every other case for __new__, *_args is
# passed along, but the expectation here is for _args
obj = super(AssocOp, cls).__new__(cls, *ordered(_args))
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence, call_cls=None):
"""Generator filtering args"""
ncls = call_cls or cls
for arg in arg_sequence:
if arg == ncls.zero:
raise ShortCircuit(arg)
elif arg == ncls.identity:
continue
elif arg.func == ncls:
yield from arg.args
else:
yield arg
@classmethod
def make_args(cls, expr):
"""
Return a set of args such that cls(*arg_set) == expr.
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([sympify(expr)])
|
LatticeOp
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_models.py
|
{
"start": 22369,
"end": 35671
}
|
class ____(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]])
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == "<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>"
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0.0, 0.7, 1.4, 2.1, 3.9]
ans1 = [1.0, 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1]
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None)
assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test with no units on points.
model = LookupTable(points=points, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0, 4)
model = LookupTable(points=points, lookup_table=values * u.nJy, method="nearest")
assert_quantity_allclose(model(xnew), np.array([1, 10, 10, 2, -3]) * u.nJy)
assert_quantity_allclose(model(xnew.to(u.nm)), np.array([1, 10, 10, 2, -3]) * u.nJy)
assert model.bounding_box == (0, 4)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable(
[1, 2, 3],
[10, 20, 30] * u.nJy,
bounds_error=False,
fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)),
)
assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_2d():
table = np.array(
[
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131],
]
)
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0.0, 0.7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array([-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
MESSAGE = r"Only n_models=1 is supported"
with pytest.raises(NotImplementedError, match=MESSAGE):
model = LookupTable(n_models=2)
MESSAGE = r"Must provide a lookup table"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(lookup_table=[1, 2, 3])
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
MESSAGE = r"points must all have the same unit"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
MESSAGE = r"fill value is in Jy but expected to be unitless"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points, table, bounds_error=False, fill_value=1 * u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
MESSAGE = r"Lookup table must have at least one dimension"
with pytest.raises(ValueError, match=MESSAGE):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert p(1) == p(1, with_bounding_box=True)
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(
t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]],
)
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = models.Tabular1D(points, lt)
result = t(1 * u.pix, with_bounding_box=True)
assert result == 1.0 * u.AA
assert t.inverse(result, with_bounding_box=True) == 1 * u.pix
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1.0, 2.0)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1.0, 2.0)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError, match=r"^$"):
t.inverse((3.4, 7.0))
# Check that Tabular2D.inverse raises an error
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(
NotImplementedError,
match=r"An analytical inverse transform has not been implemented for this model\.",
):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
MESSAGE = r"Expected grid points in 2 directions, got 5."
with pytest.raises(ValueError, match=MESSAGE):
models.Tabular2D(points, lt)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
repr(t)
== "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
repr(t)
== "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), "
"lookup_table=[[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]])>"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
str(t) == "Model: Tabular1D\n"
"N_inputs: 1\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]),)\n"
" lookup_table: [0 1 2 3 4]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
str(t) == "Model: Tabular2D\n"
"N_inputs: 2\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n"
" lookup_table: [[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_evaluate():
import scipy.interpolate as scipy_interpolate
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(
scipy_interpolate, "interpn", autospec=True, return_value=value
) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
|
TestFittable2DModels
|
python
|
huggingface__transformers
|
src/transformers/quantizers/quantizer_auto_round.py
|
{
"start": 967,
"end": 3068
}
|
class ____(HfQuantizer):
"""
Quantizer of the AutoRound method. (https://huggingface.co/papers/2309.05516)
"""
# AutoRound requires data calibration - we support only inference
requires_calibration = True
required_packages = ["auto_round"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
self.device_map = kwargs.get("device_map")
if not is_auto_round_available():
raise ImportError(
"Loading an AutoRound quantized model requires auto-round library (`pip install 'auto-round>=0.5'`)"
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
dtype = torch.bfloat16
logger.info("Loading the model in `torch.bfloat16`. To overwrite it, set `dtype` manually.")
return dtype
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
logger.warning("AutoRound offers only limited support for models that are not strictly text-based.")
from auto_round.inference.convert_model import convert_hf_model, infer_target_device
if self.pre_quantized:
target_device = infer_target_device(self.device_map)
model, used_backends = convert_hf_model(model, target_device)
self.used_backends = used_backends
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
from auto_round.inference.convert_model import post_init
post_init(model, self.used_backends)
else:
raise ValueError("AutoRound only sports pre-quantized models.")
@property
def is_trainable(self) -> bool:
return False
def is_serializable(self, safe_serialization=None):
## for gptq/awq models, the quantization config will be changed
return True
|
AutoRoundQuantizer
|
python
|
django__django
|
tests/postgres_tests/models.py
|
{
"start": 262,
"end": 436
}
|
class ____:
def __init__(self, tag_id):
self.tag_id = tag_id
def __eq__(self, other):
return isinstance(other, Tag) and self.tag_id == other.tag_id
|
Tag
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/core/airflow_defs_data.py
|
{
"start": 1361,
"end": 6980
}
|
class ____:
"""A class that holds data about the assets that are mapped to Airflow dags and tasks, and
provides methods for retrieving information about the mappings.
The user should not instantiate this class directly. It is provided when customizing the events
that are generated by the Airflow sensor using the `event_transformer_fn` argument of
:py:func:`build_defs_from_airflow_instance`.
"""
airflow_instance: AirflowInstance
resolved_repository: RepositoryDefinition
@property
def airflow_mapped_asset_specs(self) -> Mapping[AssetKey, AssetSpec]:
"""The assets that are mapped to Airflow tasks and dags."""
return {
spec.key: spec
for spec in spec_iterator(self.resolved_repository.assets_defs_by_key.values())
if _is_mapped_asset_spec(spec)
}
@property
def airflow_mapped_jobs(self) -> Sequence[JobDefinition]:
"""Jobs mapping to Airflow dags."""
return [
job for job in self.resolved_repository.get_all_jobs() if is_airflow_mapped_job(job)
]
@property
def airflow_mapped_jobs_by_dag_handle(
self,
) -> Mapping[DagHandle, JobDefinition]:
"""Jobs mapping to Airflow dags by dag_id."""
return {dag_handle_from_job(job): job for job in self.airflow_mapped_jobs}
@property
def assets_per_job(self) -> Mapping[str, AbstractSet[AssetKey]]:
"""Assets per job mapping to Airflow dags."""
return {
job.name: self.assets_produced_by_dags[dag_handle.dag_id]
for dag_handle, job in self.airflow_mapped_jobs_by_dag_handle.items()
}
@property
def assets_produced_by_dags(self) -> Mapping[str, AbstractSet[AssetKey]]:
"""Assets produced by Airflow dags."""
result = defaultdict(set)
for spec in self.airflow_mapped_asset_specs.values():
for dag_id in get_producing_dag_ids(spec):
result[dag_id].add(spec.key)
return result
@public
@property
def instance_name(self) -> str:
"""The name of the Airflow instance."""
return self.airflow_instance.name
@cached_property
def mapping_info(self) -> AirliftMetadataMappingInfo:
return build_airlift_metadata_mapping_info(self.airflow_mapped_asset_specs.values())
@public
def task_ids_in_dag(self, dag_id: str) -> set[str]:
"""Returns the task ids within the given dag_id.
Args:
dag_id (str): The dag id.
"""
return self.mapping_info.task_id_map[dag_id]
@property
def dag_ids_with_mapped_asset_keys(self) -> AbstractSet[str]:
"""All dag_ids that have asset keys explicitly mapped to them. This include peered dag assets."""
# dag ids that have asset keys explicitly mapped to them, or tasks within them
explicitly_mapped_dag_ids = self.mapping_info.dag_ids
# dag ids that have a "peered" dag asset
peered_dag_ids = {
handle.dag_id for handle in self.peered_dag_asset_keys_by_dag_handle.keys()
}
return explicitly_mapped_dag_ids.union(peered_dag_ids)
@cached_property
def mapped_asset_keys_by_task_handle(self) -> Mapping[TaskHandle, AbstractSet[AssetKey]]:
asset_keys_per_handle = defaultdict(set)
for spec in self.airflow_mapped_asset_specs.values():
if is_task_mapped_asset_spec(spec):
task_handles = task_handles_for_spec(spec)
for task_handle in task_handles:
asset_keys_per_handle[task_handle].add(spec.key)
return asset_keys_per_handle
# these dag handle properties are ripe for consolidation
@cached_property
def mapped_asset_keys_by_dag_handle(self) -> Mapping[DagHandle, AbstractSet[AssetKey]]:
"""Assets specifically mapped to each dag."""
asset_keys_per_handle = defaultdict(set)
for spec in self.airflow_mapped_asset_specs.values():
if is_dag_mapped_asset_spec(spec):
dag_handles = dag_handles_for_spec(spec)
for dag_handle in dag_handles:
asset_keys_per_handle[dag_handle].add(spec.key)
return asset_keys_per_handle
@cached_property
def peered_dag_asset_keys_by_dag_handle(self) -> Mapping[DagHandle, AbstractSet[AssetKey]]:
"""Autogenerated "peered" dag assets."""
asset_keys_per_handle = defaultdict(set)
for spec in self.airflow_mapped_asset_specs.values():
if is_peered_dag_asset_spec(spec):
dag_handles = peered_dag_handles_for_spec(spec)
for dag_handle in dag_handles:
asset_keys_per_handle[dag_handle].add(spec.key)
return asset_keys_per_handle
@cached_property
def all_asset_keys_by_dag_handle(self) -> Mapping[DagHandle, AbstractSet[AssetKey]]:
"""All asset keys mapped to each dag."""
res = defaultdict(set)
for handle, keys in self.mapped_asset_keys_by_dag_handle.items():
res[handle].update(keys)
for handle, keys in self.peered_dag_asset_keys_by_dag_handle.items():
res[handle].update(keys)
return res
@public
def asset_keys_in_task(self, dag_id: str, task_id: str) -> AbstractSet[AssetKey]:
"""Returns the asset keys that are mapped to the given task.
Args:
dag_id (str): The dag id.
task_id (str): The task id.
"""
return self.mapped_asset_keys_by_task_handle[TaskHandle(dag_id=dag_id, task_id=task_id)]
|
AirflowDefinitionsData
|
python
|
conda__conda
|
conda/models/channel.py
|
{
"start": 13682,
"end": 24359
}
|
class ____(Channel):
def __init__(
self,
name: str,
channels: Iterable[Channel],
platform: str | None = None,
):
self.name = name
self.location = None
# assume all channels are Channels (not MultiChannels)
if platform:
channels = (
Channel(**{**channel.dump(), "platform": platform})
for channel in channels
)
self._channels = tuple(channels)
self.scheme = None
self.auth = None
self.token = None
self.platform = platform
self.package_filename = None
@property
def canonical_name(self) -> str:
return self.name
def urls(
self,
with_credentials: bool = False,
subdirs: Iterable[str] | None = None,
) -> list[str]:
return [
url
for channel in self.channels
for url in channel.urls(with_credentials, subdirs)
]
@property
def base_url(self) -> None:
return None
@property
def base_urls(self) -> tuple[str | None, ...]:
return tuple(channel.base_url for channel in self.channels)
def url(self, with_credentials: bool = False) -> None:
return None
@property
def channels(self) -> tuple[Channel, ...]:
return self._channels
def dump(self) -> dict[str, Any]:
return {
"name": self.name,
"channels": tuple(channel.dump() for channel in self.channels),
}
def tokenized_startswith(
test_iterable: Iterable[Any], startswith_iterable: Iterable[Any]
) -> bool:
return all(t == sw for t, sw in zip(test_iterable, startswith_iterable))
def tokenized_conda_url_startswith(
test_url: Iterable[str], startswith_url: Iterable[str]
) -> bool:
test_url, startswith_url = urlparse(test_url), urlparse(startswith_url)
if (
test_url.hostname != startswith_url.hostname
or test_url.port != startswith_url.port
):
return False
norm_url_path = lambda url: url.path.strip("/") or "/"
return tokenized_startswith(
norm_url_path(test_url).split("/"), norm_url_path(startswith_url).split("/")
)
def _get_channel_for_name(channel_name: str) -> Channel:
def _get_channel_for_name_helper(name: str) -> Channel | None:
if name in context.custom_channels:
return context.custom_channels[name]
else:
test_name = name.rsplit("/", 1)[0] # progressively strip off path segments
if test_name == name:
return None
return _get_channel_for_name_helper(test_name)
_stripped, platform = split_platform(context.known_subdirs, channel_name)
channel = _get_channel_for_name_helper(_stripped)
if channel is not None:
# stripping off path threw information away from channel_name (i.e. any potential subname)
# channel.name *should still be* channel_name
channel = copy(channel)
channel.name = _stripped
if platform:
channel.platform = platform
return channel
else:
ca = context.channel_alias
return Channel(
scheme=ca.scheme,
auth=ca.auth,
location=ca.location,
token=ca.token,
name=_stripped,
platform=platform,
)
def _read_channel_configuration(
scheme: str | None, host: str | None, port: str | None, path: str | None
) -> tuple[str | None, str | None, str | None, str | None, str | None]:
# return location, name, scheme, auth, token
path = path and path.rstrip("/")
test_url = str(Url(hostname=host, port=port, path=path))
# Step 1. No path given; channel name is None
if not path:
return (
str(Url(hostname=host, port=port)).rstrip("/"),
None,
scheme or None,
None,
None,
)
# Step 2. migrated_custom_channels matches
for name, location in sorted(
context.migrated_custom_channels.items(), reverse=True, key=lambda x: len(x[0])
):
location, _scheme, _auth, _token = split_scheme_auth_token(location)
if tokenized_conda_url_startswith(test_url, join_url(location, name)):
# translate location to new location, with new credentials
subname = test_url.replace(join_url(location, name), "", 1).strip("/")
channel_name = join_url(name, subname)
channel = _get_channel_for_name(channel_name)
return (
channel.location,
channel_name,
channel.scheme,
channel.auth,
channel.token,
)
# Step 3. migrated_channel_aliases matches
for migrated_alias in context.migrated_channel_aliases:
if test_url.startswith(migrated_alias.location):
name = test_url.replace(migrated_alias.location, "", 1).strip("/")
ca = context.channel_alias
return ca.location, name, ca.scheme, ca.auth, ca.token
# Step 4. custom_channels matches
for name, channel in sorted(
context.custom_channels.items(), reverse=True, key=lambda x: len(x[0])
):
that_test_url = join_url(channel.location, channel.name)
if tokenized_startswith(test_url.split("/"), that_test_url.split("/")):
subname = test_url.replace(that_test_url, "", 1).strip("/")
return (
channel.location,
join_url(channel.name, subname),
scheme,
channel.auth,
channel.token,
)
# Step 5. channel_alias match
ca = context.channel_alias
if ca.location and tokenized_startswith(
test_url.split("/"), ca.location.split("/")
):
name = test_url.replace(ca.location, "", 1).strip("/") or None
return ca.location, name, scheme, ca.auth, ca.token
# Step 6. not-otherwise-specified file://-type urls
if host is None:
# this should probably only happen with a file:// type url
if port is not None:
raise ValueError("Port should not be set if host is not set either.")
location, name = test_url.rsplit("/", 1)
if not location:
location = "/"
_scheme, _auth, _token = "file", None, None
return location, name, _scheme, _auth, _token
# Step 7. fall through to host:port as channel_location and path as channel_name
# but bump the first token of paths starting with /conda for compatibility with
# Anaconda Enterprise Repository software.
bump = None
path_parts = path.strip("/").split("/")
if path_parts and path_parts[0] == "conda":
bump, path = "conda", "/".join(path_parts[1:])
return (
str(Url(hostname=host, port=port, path=bump)).rstrip("/"),
path.strip("/") or None,
scheme or None,
None,
None,
)
def parse_conda_channel_url(url: str) -> Channel:
(
scheme,
auth,
token,
platform,
package_filename,
host,
port,
path,
query,
) = split_conda_url_easy_parts(context.known_subdirs, url)
# recombine host, port, path to get a channel_name and channel_location
(
channel_location,
channel_name,
configured_scheme,
configured_auth,
configured_token,
) = _read_channel_configuration(scheme, host, port, path)
# if we came out with no channel_location or channel_name, we need to figure it out
# from host, port, path
if channel_location is None and channel_name is None:
raise ValueError("channel_location and channel_name cannot both be None")
# These two fields might have URL-encodable characters that we should decode
# We don't decode the full URL because some %XX values might be part of some auth values
if channel_name:
channel_name = percent_decode(channel_name)
if package_filename:
package_filename = percent_decode(package_filename)
return Channel(
configured_scheme or "https",
auth or configured_auth,
channel_location,
token or configured_token,
channel_name,
platform,
package_filename,
)
# backward compatibility for conda-build
def get_conda_build_local_url() -> tuple[PathType]:
return (context.local_build_root,)
def prioritize_channels(
channels: Iterable[Channel | str],
with_credentials: bool = True,
subdirs: Iterable[str] | None = None,
) -> dict[str, tuple[str, int]]:
"""Make a dictionary of channel priorities.
Maps channel names to priorities, e.g.:
.. code-block:: pycon
>>> prioritize_channels(["conda-canary", "defaults", "conda-forge"])
{
'https://conda.anaconda.org/conda-canary/osx-arm64': ('conda-canary', 0),
'https://conda.anaconda.org/conda-canary/noarch': ('conda-canary', 0),
'https://repo.anaconda.com/pkgs/main/osx-arm64': ('defaults', 1),
'https://repo.anaconda.com/pkgs/main/noarch': ('defaults', 1),
'https://repo.anaconda.com/pkgs/r/osx-arm64': ('defaults', 2),
'https://repo.anaconda.com/pkgs/r/noarch': ('defaults', 2),
'https://conda.anaconda.org/conda-forge/osx-arm64': ('conda-forge', 3),
'https://conda.anaconda.org/conda-forge/noarch': ('conda-forge', 3),
}
Compare with ``conda.resolve.Resolve._make_channel_priorities``.
"""
channels = (channel for name in channels for channel in Channel(name).channels)
result = {}
for priority_counter, channel in enumerate(channels):
for url in channel.urls(with_credentials, subdirs):
if url in result:
continue
result[url] = (
channel.canonical_name,
min(priority_counter, MAX_CHANNEL_PRIORITY - 1),
)
return result
def all_channel_urls(
channels: Iterable[str | Channel],
subdirs: Iterable[str] | None = None,
with_credentials: bool = True,
) -> IndexedSet:
result = IndexedSet()
for chn in channels:
channel = Channel(chn)
result.update(channel.urls(with_credentials, subdirs))
return result
def offline_keep(url: Any) -> bool:
return not context.offline or not is_url(url) or url.startswith("file:/")
def get_channel_objs(ctx: Context) -> tuple[Channel, ...]:
"""Return current channels as Channel objects"""
return tuple(Channel(chn) for chn in ctx.channels)
context.register_reset_callaback(Channel._reset_state)
|
MultiChannel
|
python
|
getsentry__sentry
|
src/sentry/ingest/transaction_clusterer/__init__.py
|
{
"start": 744,
"end": 1071
}
|
class ____(Enum):
TRANSACTIONS = NamespaceOption(
name="transactions",
data="txnames2",
rules="txrules",
persistent_storage="sentry:transaction_name_cluster_rules",
tracker="txcluster.rules_per_project",
meta_store="sentry:transaction_name_cluster_meta",
)
|
ClustererNamespace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.