language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
dask__distributed
|
distributed/diagnostics/graph_layout.py
|
{
"start": 156,
"end": 5201
}
|
class ____(SchedulerPlugin):
"""Dynamic graph layout during computation
This assigns (x, y) locations to all tasks quickly and dynamically as new
tasks are added. This scales to a few thousand nodes.
It is commonly used with distributed/dashboard/components/scheduler.py::TaskGraph, which
is rendered at /graph on the diagnostic dashboard.
"""
def __init__(self, scheduler):
self.name = f"graph-layout-{uuid.uuid4()}"
self.x = {}
self.y = {}
self.collision = {}
self.scheduler = scheduler
self.index = {}
self.index_edge = {}
self.next_y = 0
self.next_index = 0
self.next_edge_index = 0
self.new = []
self.new_edges = []
self.state_updates = []
self.visible_updates = []
self.visible_edge_updates = []
if self.scheduler.tasks:
dependencies = {
k: [ds.key for ds in ts.dependencies]
for k, ts in scheduler.tasks.items()
}
priority = {k: ts.priority for k, ts in scheduler.tasks.items()}
self.update_graph(
self.scheduler,
tasks=self.scheduler.tasks,
dependencies=dependencies,
priority=priority,
)
def update_graph(self, scheduler, *, priority=None, tasks=None, **kwargs):
stack = sorted(
tasks, key=lambda k: TupleComparable(priority.get(k, 0)), reverse=True
)
while stack:
key = stack.pop()
if key in self.x or key not in scheduler.tasks:
continue
deps = [ts.key for ts in scheduler.tasks[key].dependencies]
if deps:
if not all(dep in self.y for dep in deps):
stack.append(key)
stack.extend(
sorted(
deps,
key=lambda k: TupleComparable(priority.get(k, 0)),
reverse=True,
)
)
continue
else:
total_deps = sum(
len(scheduler.tasks[dep].dependents) for dep in deps
)
y = sum(
self.y[dep] * len(scheduler.tasks[dep].dependents) / total_deps
for dep in deps
)
x = max(self.x[dep] for dep in deps) + 1
else:
x = 0
y = self.next_y
self.next_y += 1
if (x, y) in self.collision:
old_x, old_y = x, y
x, y = self.collision[(x, y)]
y += 0.1
self.collision[old_x, old_y] = (x, y)
else:
self.collision[(x, y)] = (x, y)
self.x[key] = x
self.y[key] = y
self.index[key] = self.next_index
self.next_index = self.next_index + 1
self.new.append(key)
for dep in deps:
edge = (dep, key)
self.index_edge[edge] = self.next_edge_index
self.next_edge_index += 1
self.new_edges.append(edge)
def transition(self, key, start, finish, *args, **kwargs):
if finish != "forgotten":
self.state_updates.append((self.index[key], finish))
else:
self.visible_updates.append((self.index[key], "False"))
task = self.scheduler.tasks[key]
for dep in task.dependents:
edge = (key, dep.key)
self.visible_edge_updates.append((self.index_edge.pop(edge), "False"))
for dep in task.dependencies:
self.visible_edge_updates.append(
(self.index_edge.pop((dep.key, key)), "False")
)
try:
del self.collision[(self.x[key], self.y[key])]
except KeyError:
pass
for collection in [self.x, self.y, self.index]:
del collection[key]
def reset_index(self):
"""Reset the index and refill new and new_edges
From time to time TaskGraph wants to remove invisible nodes and reset
all of its indices. This helps.
"""
self.new = []
self.new_edges = []
self.visible_updates = []
self.state_updates = []
self.visible_edge_updates = []
self.index = {}
self.next_index = 0
self.index_edge = {}
self.next_edge_index = 0
for key in self.x:
self.index[key] = self.next_index
self.next_index += 1
self.new.append(key)
for dep in self.scheduler.tasks[key].dependencies:
edge = (dep.key, key)
self.index_edge[edge] = self.next_edge_index
self.next_edge_index += 1
self.new_edges.append(edge)
|
GraphLayout
|
python
|
huggingface__transformers
|
src/transformers/models/sam2/modeling_sam2.py
|
{
"start": 26633,
"end": 28417
}
|
class ____(Sam2PreTrainedModel):
config_class = Sam2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam2VisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@check_model_inputs()
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2VisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = backbone_output.intermediate_hidden_states
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return Sam2VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
|
Sam2VisionModel
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/auth.py
|
{
"start": 18944,
"end": 20455
}
|
class ____(Request):
"""
Get a token based on supplied credentials (key/secret).
Intended for use by users with key/secret credentials that wish to obtain a token
for use with other services.
:param expiration_sec: Requested token expiration time in seconds. Not
guaranteed, might be overridden by the service
:type expiration_sec: int
"""
_service = "auth"
_action = "login"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"expiration_sec": {
"description": "Requested token expiration time in seconds. Not guaranteed, might be overridden by the service",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, expiration_sec: Optional[int] = None, **kwargs: Any) -> None:
super(LoginRequest, self).__init__(**kwargs)
self.expiration_sec = expiration_sec
@schema_property("expiration_sec")
def expiration_sec(self) -> Optional[int]:
return self._property_expiration_sec
@expiration_sec.setter
def expiration_sec(self, value: Optional[int]) -> None:
if value is None:
self._property_expiration_sec = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "expiration_sec", six.integer_types)
self._property_expiration_sec = value
|
LoginRequest
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/rpc/dist_autograd_test.py
|
{
"start": 99613,
"end": 101859
}
|
class ____(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(
self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE
)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
|
FaultyAgentDistAutogradTest
|
python
|
astropy__astropy
|
astropy/visualization/scripts/tests/test_fits2bitmap.py
|
{
"start": 395,
"end": 2300
}
|
class ____:
def setup_class(self):
self.filename = "test.fits"
self.array = np.arange(16384).reshape((128, 128))
def test_function(self, tmp_path):
filename = tmp_path / self.filename
fits.writeto(filename, self.array)
fits2bitmap(filename)
def test_script(self, tmp_path):
filename = str(tmp_path / self.filename)
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
def test_exten_num(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", "1"])
def test_exten_name(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
extname = "SCI"
hdu2 = fits.ImageHDU(self.array)
hdu2.header["EXTNAME"] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", extname])
@pytest.mark.parametrize("file_exten", [".gz", ".bz2"])
def test_compressed_fits(self, tmp_path, file_exten):
filename = str(tmp_path / f"test.fits{file_exten}")
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
def test_orientation(self, tmp_path):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = str(tmp_path / self.filename)
out_filename = "fits2bitmap_test.png"
out_filename = str(tmp_path / out_filename)
data = np.zeros((32, 32))
data[0:16, :] = 1.0
fits.writeto(filename, data)
main([filename, "-e", "0", "-o", out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
TestFits2Bitmap
|
python
|
walkccc__LeetCode
|
solutions/3296. Minimum Number of Seconds to Make Mountain Height Zero/3296.py
|
{
"start": 0,
"end": 811
}
|
class ____:
def minNumberOfSeconds(
self,
mountainHeight: int,
workerTimes: list[int]
) -> int:
def getReducedHeight(m: int) -> int:
"""Returns the total height reduced by all workers in `m` seconds."""
# The height `x` that a worker with working time `w` reduces in `m`
# seconds.
# w * (1 + 2 + ... + x) <= m
# (1 + x) * x / 2 <= m / w
# x^2 + x - 2 * m / w <= 0
# x <= (-1 + sqrt(1 + 8 * m / w)) / 2
return sum((-1 + math.sqrt(1 + 8 * m // workerTime)) // 2
for workerTime in workerTimes)
l = 1
r = min(workerTimes) * mountainHeight * (mountainHeight + 1) // 2
return bisect.bisect_left(range(l, r), mountainHeight,
key=getReducedHeight) + l
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/testutils/cases.py
|
{
"start": 90063,
"end": 91600
}
|
class ____(APITestCase):
def setUp(self):
super().setUp()
self.login_as(self.user)
@pytest.fixture(autouse=True)
def responses_context(self):
with responses.mock:
yield
def add_create_repository_responses(self, repository_config):
raise NotImplementedError(f"implement for {type(self).__module__}.{type(self).__name__}")
@assume_test_silo_mode(SiloMode.REGION)
def create_repository(
self,
repository_config,
integration_id,
organization_slug=None,
add_responses=True,
):
if add_responses:
self.add_create_repository_responses(repository_config)
if not integration_id:
data = {
"provider": self.provider_name,
"identifier": repository_config["id"],
}
else:
data = {
"provider": self.provider_name,
"installation": integration_id,
"identifier": repository_config["id"],
}
response = self.client.post(
path=reverse(
"sentry-api-0-organization-repositories",
args=[organization_slug or self.organization.slug],
),
data=data,
)
return response
def assert_error_message(self, response, error_type, error_message):
assert response.data["error_type"] == error_type
assert error_message in response.data["errors"]["__all__"]
|
IntegrationRepositoryTestCase
|
python
|
tornadoweb__tornado
|
tornado/test/util_test.py
|
{
"start": 1787,
"end": 1875
}
|
class ____(TestConfig3):
def initialize(self, a=None):
self.a = a
|
TestConfig3A
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/structured_query.py
|
{
"start": 2668,
"end": 2784
}
|
class ____(str, Enum):
"""Enumerator of the operations."""
AND = "and"
OR = "or"
NOT = "not"
|
Operator
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 19251,
"end": 19442
}
|
class ____(Markdown):
def __init__(self, proto: MarkdownProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "caption"
@dataclass(repr=False)
|
Caption
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 16111,
"end": 16261
}
|
class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_templatetag"
expected_hash = "2bb88185b4f5"
|
OfflineCompressTemplateTagTestCase
|
python
|
django-extensions__django-extensions
|
django_extensions/collision_resolvers.py
|
{
"start": 5282,
"end": 5661
}
|
class ____(AppNameCR):
"""
Collision resolver which transform pair (app name, model_name) to alias "{app_name}_{model_name}".
Model from last application in alphabetical order is selected.
Result is different than FullPathCR, when model has app_label other than current app.
""" # noqa: E501
MODIFICATION_STRING = "{app_name}_{model_name}"
|
AppNamePrefixCR
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/animation.py
|
{
"start": 3122,
"end": 6836
}
|
class ____(abc.ABC):
"""
Abstract base class for writing movies, providing a way to grab frames by
calling `~AbstractMovieWriter.grab_frame`.
`setup` is called to start the process and `finish` is called afterwards.
`saving` is provided as a context manager to facilitate this process as ::
with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
# Iterate over frames
moviewriter.grab_frame(**savefig_kwargs)
The use of the context manager ensures that `setup` and `finish` are
performed as necessary.
An instance of a concrete subclass of this class can be given as the
``writer`` argument of `Animation.save()`.
"""
def __init__(self, fps=5, metadata=None, codec=None, bitrate=None):
self.fps = fps
self.metadata = metadata if metadata is not None else {}
self.codec = mpl._val_or_rc(codec, 'animation.codec')
self.bitrate = mpl._val_or_rc(bitrate, 'animation.bitrate')
@abc.abstractmethod
def setup(self, fig, outfile, dpi=None):
"""
Setup for writing the movie file.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object that contains the information for frames.
outfile : str
The filename of the resulting movie file.
dpi : float, default: ``fig.dpi``
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
"""
# Check that path is valid
Path(outfile).parent.resolve(strict=True)
self.outfile = outfile
self.fig = fig
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
@property
def frame_size(self):
"""A tuple ``(width, height)`` in pixels of a movie frame."""
w, h = self.fig.get_size_inches()
return int(w * self.dpi), int(h * self.dpi)
def _supports_transparency(self):
"""
Whether this writer supports transparency.
Writers may consult output file type and codec to determine this at runtime.
"""
return False
@abc.abstractmethod
def grab_frame(self, **savefig_kwargs):
"""
Grab the image information from the figure and save as a movie frame.
All keyword arguments in *savefig_kwargs* are passed on to the
`~.Figure.savefig` call that saves the figure. However, several
keyword arguments that are supported by `~.Figure.savefig` may not be
passed as they are controlled by the MovieWriter:
- *dpi*, *bbox_inches*: These may not be passed because each frame of the
animation much be exactly the same size in pixels.
- *format*: This is controlled by the MovieWriter.
"""
@abc.abstractmethod
def finish(self):
"""Finish any processing for writing the movie."""
@contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args, **kwargs):
"""
Context manager to facilitate writing the movie file.
``*args, **kw`` are any parameters that should be passed to `setup`.
"""
if mpl.rcParams['savefig.bbox'] == 'tight':
_log.info("Disabling savefig.bbox = 'tight', as it may cause "
"frame size to vary, which is inappropriate for "
"animation.")
# This particular sequence is what contextlib.contextmanager wants
self.setup(fig, outfile, dpi, *args, **kwargs)
with mpl.rc_context({'savefig.bbox': None}):
try:
yield self
finally:
self.finish()
|
AbstractMovieWriter
|
python
|
hyperopt__hyperopt
|
hyperopt/algobase.py
|
{
"start": 294,
"end": 8559
}
|
class ____:
def __init__(self, expr, deepcopy_inputs=False, max_program_len=None, memo_gc=True):
"""
Parameters
----------
expr - pyll Apply instance to be evaluated
deepcopy_inputs - deepcopy inputs to every node prior to calling that
node's function on those inputs. If this leads to a different
return value, then some function (XXX add more complete DebugMode
functionality) in your graph is modifying its inputs and causing
mis-calculation. XXX: This is not a fully-functional DebugMode
because if the offender happens on account of the toposort order
to be the last user of said input, then it will not be detected as
a potential problem.
max_program_len : int (default pyll.base.DEFAULT_MAX_PROGRAM_LEN)
If more than this many nodes are evaluated in the course of
evaluating `expr`, then evaluation is aborted under the assumption
that an infinite recursion is underway.
memo_gc : bool
If True, values computed for apply nodes within `expr` may be
cleared during computation. The bookkeeping required to do this
takes a bit of extra time, but usually no big deal.
"""
self.expr = pyll.as_apply(expr)
if deepcopy_inputs not in (0, 1, False, True):
# -- I've been calling rec_eval(expr, memo) by accident a few times
# this error would have been appreciated.
#
# TODO: Good candidate for Py3K keyword-only argument
raise ValueError("deepcopy_inputs should be bool", deepcopy_inputs)
self.deepcopy_inputs = deepcopy_inputs
if max_program_len is None:
self.max_program_len = pyll.base.DEFAULT_MAX_PROGRAM_LEN
else:
self.max_program_len = max_program_len
self.memo_gc = memo_gc
def eval_nodes(self, memo=None):
if memo is None:
memo = {}
else:
memo = dict(memo)
# TODO: optimize dfs to not recurse past the items in memo
# this is especially important for evaluating Lambdas
# which cause rec_eval to recurse
#
# N.B. that Lambdas may expand the graph during the evaluation
# so that this iteration may be an incomplete
if self.memo_gc:
clients = self.clients = {}
for aa in pyll.dfs(self.expr):
clients.setdefault(aa, set())
for ii in aa.inputs():
clients.setdefault(ii, set()).add(aa)
todo = deque([self.expr])
while todo:
if len(todo) > self.max_program_len:
raise RuntimeError("Probably infinite loop in document")
node = todo.pop()
if node in memo:
# -- we've already computed this, move on.
continue
# -- different kinds of nodes are treated differently:
if node.name == "switch":
waiting_on = self.on_switch(memo, node)
if waiting_on is None:
continue
elif isinstance(node, pyll.Literal):
# -- constants go straight into the memo
self.set_in_memo(memo, node, node.obj)
continue
else:
# -- normal instruction-type nodes have inputs
waiting_on = [v for v in node.inputs() if v not in memo]
if waiting_on:
# -- Necessary inputs have yet to be evaluated.
# push the node back in the queue, along with the
# inputs it still needs
todo.append(node)
todo.extend(waiting_on)
else:
rval = self.on_node(memo, node)
if isinstance(rval, pyll.Apply):
# -- if an instruction returns a Pyll apply node
# it means evaluate that too. Lambdas do this.
#
# XXX: consider if it is desirable, efficient, buggy
# etc. to keep using the same memo dictionary.
# I think it is OK because by using the same
# dictionary all of the nodes are stored in the memo
# so all keys are preserved until the entire outer
# function returns
evaluator = self.__class__(
rval, self.deep_copy_inputs, self.max_program_len, self.memo_gc
)
foo = evaluator(memo)
self.set_in_memo(memo, node, foo)
else:
self.set_in_memo(memo, node, rval)
return memo
def set_in_memo(self, memo, k, v):
"""Assign memo[k] = v
This is implementation optionally drops references to the arguments
"clients" required to compute apply-node `k`, which allows those
objects to be garbage-collected. This feature is enabled by
`self.memo_gc`.
"""
if self.memo_gc:
assert v is not pyll.base.GarbageCollected
memo[k] = v
for ii in k.inputs():
# -- if all clients of ii are already in the memo
# then we can free memo[ii] by replacing it
# with a dummy symbol
if all(iic in memo for iic in self.clients[ii]):
memo[ii] = pyll.base.GarbageCollected
else:
memo[k] = v
def on_switch(self, memo, node):
# -- pyll.base.switch is a control-flow expression.
#
# It's signature is
# int, option0, option1, option2, ..., optionN
#
# The semantics of a switch node are to only evaluate the option
# corresponding to the value of the leading integer. (Think of
# a switch block in the C language.)
#
# This is a helper-function to self.eval_nodes. It returns None,
# or a list of apply-nodes required to evaluate the given switch
# node.
#
# When it returns None, the memo has been updated so that
# memo[`node`] has been assigned the computed value for the given
# switch node.
#
switch_i_var = node.pos_args[0]
if switch_i_var in memo:
switch_i = memo[switch_i_var]
try:
int(switch_i)
except:
raise TypeError("switch argument was", switch_i)
if switch_i != int(switch_i) or switch_i < 0:
raise ValueError("switch pos must be positive int", switch_i)
rval_var = node.pos_args[switch_i + 1]
if rval_var in memo:
self.set_in_memo(memo, node, memo[rval_var])
return
else:
return [rval_var]
else:
return [switch_i_var]
def on_node(self, memo, node):
# -- Retrieve computed arguments of apply node
args = _args = [memo[v] for v in node.pos_args]
kwargs = _kwargs = {k: memo[v] for (k, v) in node.named_args}
if self.memo_gc:
# -- Ensure no computed argument has been (accidentally) freed for
# garbage-collection.
for aa in args + list(kwargs.values()):
assert aa is not pyll.base.GarbageCollected
if self.deepcopy_inputs:
# -- I think this is supposed to be skipped if node.pure == True
# because that attribute is supposed to mark the node as having
# no side-effects that affect expression-evaluation.
#
# HOWEVER That has not been tested in a while, and it's hard to
# verify (with e.g. unit tests) that a node marked "pure" isn't
# lying. So we hereby ignore the `pure` attribute and copy
# everything to be on the safe side.
args = copy.deepcopy(_args)
kwargs = copy.deepcopy(_kwargs)
return pyll.scope._impls[node.name](*args, **kwargs)
|
ExprEvaluator
|
python
|
walkccc__LeetCode
|
solutions/2148. Count Elements With Strictly Smaller and Greater Elements/2148.py
|
{
"start": 0,
"end": 151
}
|
class ____:
def countElements(self, nums: list[int]) -> int:
mn = min(nums)
mx = max(nums)
return sum(mn < num < mx for num in nums)
|
Solution
|
python
|
pypa__pip
|
src/pip/_internal/network/session.py
|
{
"start": 8510,
"end": 9950
}
|
class ____:
"""Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.
The additional argument is forwarded directly to the pool manager. This allows us
to dynamically decide what SSL store to use at runtime, which is used to implement
the optional ``truststore`` backend.
"""
def __init__(
self,
*,
ssl_context: SSLContext | None = None,
**kwargs: Any,
) -> None:
self._ssl_context = ssl_context
super().__init__(**kwargs)
def init_poolmanager(
self,
connections: int,
maxsize: int,
block: bool = DEFAULT_POOLBLOCK,
**pool_kwargs: Any,
) -> PoolManager:
if self._ssl_context is not None:
pool_kwargs.setdefault("ssl_context", self._ssl_context)
return super().init_poolmanager( # type: ignore[misc]
connections=connections,
maxsize=maxsize,
block=block,
**pool_kwargs,
)
def proxy_manager_for(self, proxy: str, **proxy_kwargs: Any) -> ProxyManager:
# Proxy manager replaces the pool manager, so inject our SSL
# context here too. https://github.com/pypa/pip/issues/13288
if self._ssl_context is not None:
proxy_kwargs.setdefault("ssl_context", self._ssl_context)
return super().proxy_manager_for(proxy, **proxy_kwargs) # type: ignore[misc]
|
_SSLContextAdapterMixin
|
python
|
ray-project__ray
|
rllib/utils/exploration/tests/test_explorations.py
|
{
"start": 1966,
"end": 3784
}
|
class ____(unittest.TestCase):
"""
Tests all Exploration components and the deterministic flag for
compute_action calls.
"""
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_impala(self):
config = (
impala.IMPALAConfig()
.api_stack(
enable_rl_module_and_learner=False,
enable_env_runner_and_connector_v2=False,
)
.environment("CartPole-v1")
.env_runners(num_env_runners=0)
.resources(num_gpus=0)
)
do_test_explorations(
config,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(0),
)
def test_ppo_discr(self):
config = (
ppo.PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment("CartPole-v1")
.env_runners(num_env_runners=0)
)
do_test_explorations(
config,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(0),
)
def test_ppo_cont(self):
config = (
ppo.PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment("Pendulum-v1")
.env_runners(num_env_runners=0)
)
do_test_explorations(
config,
np.array([0.0, 0.1, 0.0]),
prev_a=np.array([0.0]),
expected_mean_action=0.0,
)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestExplorations
|
python
|
pypa__pip
|
tests/lib/requests_mocks.py
|
{
"start": 617,
"end": 1028
}
|
class ____:
request: MockRequest
connection: MockConnection
url: str
def __init__(self, contents: bytes) -> None:
self.raw = FakeStream(contents)
self.content = contents
self.reason = "OK"
self.status_code = 200
self.headers = {"Content-Length": str(len(contents))}
self.history: list[MockResponse] = []
self.from_cache = False
|
MockResponse
|
python
|
nedbat__coveragepy
|
coverage/html.py
|
{
"start": 7277,
"end": 8499
}
|
class ____:
"""A file we're considering reporting."""
def __init__(self, fr: FileReporter, analysis: Analysis) -> None:
self.fr = fr
self.analysis = analysis
self.rootname = flat_rootname(fr.relative_filename())
self.html_filename = self.rootname + ".html"
self.prev_html = self.next_html = ""
HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~"
@functools.cache
def encode_int(n: int) -> str:
"""Create a short HTML-safe string from an integer, using HTML_SAFE."""
if n == 0:
return HTML_SAFE[0]
r = []
while n:
n, t = divmod(n, len(HTML_SAFE))
r.append(HTML_SAFE[t])
return "".join(r)
def copy_with_cache_bust(src: str, dest_dir: str) -> str:
"""Copy `src` to `dest_dir`, adding a hash to the name.
Returns the updated destination file name with hash.
"""
with open(src, "rb") as f:
text = f.read()
h = Hasher()
h.update(text)
cache_bust = h.hexdigest()[:8]
src_base = os.path.basename(src)
dest = src_base.replace(".", f"_cb_{cache_bust}.")
with open(os.path.join(dest_dir, dest), "wb") as f:
f.write(text)
return dest
|
FileToReport
|
python
|
jazzband__django-pipeline
|
pipeline/compressors/cssmin.py
|
{
"start": 91,
"end": 290
}
|
class ____(SubProcessCompressor):
def compress_css(self, css):
command = (settings.CSSMIN_BINARY, settings.CSSMIN_ARGUMENTS)
return self.execute_command(command, css)
|
CSSMinCompressor
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2_moe/modeling_lfm2_moe.py
|
{
"start": 19383,
"end": 22979
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Lfm2MoeConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.rotary_fn = apply_rotary_pos_emb
self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.q_layernorm = Lfm2MoeRMSNorm(self.head_dim, eps=config.norm_eps)
self.k_layernorm = Lfm2MoeRMSNorm(self.head_dim, eps=config.norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Lfm2MoeHybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
output = self.out_proj(attn_output)
return output, attn_weights
def apply_mask_to_padding_states(hidden_states, attention_mask):
"""
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
"""
# NOTE: attention mask is a 2D boolean tensor
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return hidden_states
kernel_modules = (causal_conv1d_fn, causal_conv1d_update)
is_fast_path_available = all(kernel_modules)
|
Lfm2MoeAttention
|
python
|
simonw__datasette
|
datasette/filters.py
|
{
"start": 9064,
"end": 9528
}
|
class ____(InFilter):
key = "notin"
display = "not in"
def where_clause(self, table, column, value, param_counter):
values = self.split_value(value)
params = [f":p{param_counter + i}" for i in range(len(values))]
sql = f"{escape_sqlite(column)} not in ({', '.join(params)})"
return sql, values
def human_clause(self, column, value):
return f"{column} not in {json.dumps(self.split_value(value))}"
|
NotInFilter
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 51910,
"end": 54155
}
|
class ____(rv_continuous):
r"""A double gamma continuous random variable.
The double gamma distribution is also known as the reflected gamma
distribution [1]_.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons
(1994).
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
return np.where(x > 0,
0.5 + 0.5*sc.gammainc(a, x),
0.5*sc.gammaincc(a, -x))
def _sf(self, x, a):
return np.where(x > 0,
0.5*sc.gammaincc(a, x),
0.5 + 0.5*sc.gammainc(a, -x))
def _entropy(self, a):
return stats.gamma._entropy(a) - np.log(0.5)
def _ppf(self, q, a):
return np.where(q > 0.5,
sc.gammaincinv(a, 2*q - 1),
-sc.gammainccinv(a, 2*q))
def _isf(self, q, a):
return np.where(q > 0.5,
-sc.gammaincinv(a, 2*q - 1),
sc.gammainccinv(a, 2*q))
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
|
dgamma_gen
|
python
|
encode__httpx
|
httpx/_models.py
|
{
"start": 17449,
"end": 37994
}
|
class ____:
def __init__(
self,
status_code: int,
*,
headers: HeaderTypes | None = None,
content: ResponseContent | None = None,
text: str | None = None,
html: str | None = None,
json: typing.Any = None,
stream: SyncByteStream | AsyncByteStream | None = None,
request: Request | None = None,
extensions: ResponseExtensions | None = None,
history: list[Response] | None = None,
default_encoding: str | typing.Callable[[bytes], str] = "utf-8",
) -> None:
self.status_code = status_code
self.headers = Headers(headers)
self._request: Request | None = request
# When follow_redirects=False and a redirect is received,
# the client will set `response.next_request`.
self.next_request: Request | None = None
self.extensions = {} if extensions is None else dict(extensions)
self.history = [] if history is None else list(history)
self.is_closed = False
self.is_stream_consumed = False
self.default_encoding = default_encoding
if stream is None:
headers, stream = encode_response(content, text, html, json)
self._prepare(headers)
self.stream = stream
if isinstance(stream, ByteStream):
# Load the response body, except for streaming content.
self.read()
else:
# There's an important distinction between `Response(content=...)`,
# and `Response(stream=...)`.
#
# Using `content=...` implies automatically populated content headers,
# of either `Content-Length: ...` or `Transfer-Encoding: chunked`.
#
# Using `stream=...` will not automatically include any content headers.
#
# As an end-user you don't really need `stream=...`. It's only
# useful when creating response instances having received a stream
# from the transport API.
self.stream = stream
self._num_bytes_downloaded = 0
def _prepare(self, default_headers: dict[str, str]) -> None:
for key, value in default_headers.items():
# Ignore Transfer-Encoding if the Content-Length has been set explicitly.
if key.lower() == "transfer-encoding" and "content-length" in self.headers:
continue
self.headers.setdefault(key, value)
@property
def elapsed(self) -> datetime.timedelta:
"""
Returns the time taken for the complete request/response
cycle to complete.
"""
if not hasattr(self, "_elapsed"):
raise RuntimeError(
"'.elapsed' may only be accessed after the response "
"has been read or closed."
)
return self._elapsed
@elapsed.setter
def elapsed(self, elapsed: datetime.timedelta) -> None:
self._elapsed = elapsed
@property
def request(self) -> Request:
"""
Returns the request instance associated to the current response.
"""
if self._request is None:
raise RuntimeError(
"The request instance has not been set on this response."
)
return self._request
@request.setter
def request(self, value: Request) -> None:
self._request = value
@property
def http_version(self) -> str:
try:
http_version: bytes = self.extensions["http_version"]
except KeyError:
return "HTTP/1.1"
else:
return http_version.decode("ascii", errors="ignore")
@property
def reason_phrase(self) -> str:
try:
reason_phrase: bytes = self.extensions["reason_phrase"]
except KeyError:
return codes.get_reason_phrase(self.status_code)
else:
return reason_phrase.decode("ascii", errors="ignore")
@property
def url(self) -> URL:
"""
Returns the URL for which the request was made.
"""
return self.request.url
@property
def content(self) -> bytes:
if not hasattr(self, "_content"):
raise ResponseNotRead()
return self._content
@property
def text(self) -> str:
if not hasattr(self, "_text"):
content = self.content
if not content:
self._text = ""
else:
decoder = TextDecoder(encoding=self.encoding or "utf-8")
self._text = "".join([decoder.decode(self.content), decoder.flush()])
return self._text
@property
def encoding(self) -> str | None:
"""
Return an encoding to use for decoding the byte content into text.
The priority for determining this is given by...
* `.encoding = <>` has been set explicitly.
* The encoding as specified by the charset parameter in the Content-Type header.
* The encoding as determined by `default_encoding`, which may either be
a string like "utf-8" indicating the encoding to use, or may be a callable
which enables charset autodetection.
"""
if not hasattr(self, "_encoding"):
encoding = self.charset_encoding
if encoding is None or not _is_known_encoding(encoding):
if isinstance(self.default_encoding, str):
encoding = self.default_encoding
elif hasattr(self, "_content"):
encoding = self.default_encoding(self._content)
self._encoding = encoding or "utf-8"
return self._encoding
@encoding.setter
def encoding(self, value: str) -> None:
"""
Set the encoding to use for decoding the byte content into text.
If the `text` attribute has been accessed, attempting to set the
encoding will throw a ValueError.
"""
if hasattr(self, "_text"):
raise ValueError(
"Setting encoding after `text` has been accessed is not allowed."
)
self._encoding = value
@property
def charset_encoding(self) -> str | None:
"""
Return the encoding, as specified by the Content-Type header.
"""
content_type = self.headers.get("Content-Type")
if content_type is None:
return None
return _parse_content_type_charset(content_type)
def _get_content_decoder(self) -> ContentDecoder:
"""
Returns a decoder instance which can be used to decode the raw byte
content, depending on the Content-Encoding used in the response.
"""
if not hasattr(self, "_decoder"):
decoders: list[ContentDecoder] = []
values = self.headers.get_list("content-encoding", split_commas=True)
for value in values:
value = value.strip().lower()
try:
decoder_cls = SUPPORTED_DECODERS[value]
decoders.append(decoder_cls())
except KeyError:
continue
if len(decoders) == 1:
self._decoder = decoders[0]
elif len(decoders) > 1:
self._decoder = MultiDecoder(children=decoders)
else:
self._decoder = IdentityDecoder()
return self._decoder
@property
def is_informational(self) -> bool:
"""
A property which is `True` for 1xx status codes, `False` otherwise.
"""
return codes.is_informational(self.status_code)
@property
def is_success(self) -> bool:
"""
A property which is `True` for 2xx status codes, `False` otherwise.
"""
return codes.is_success(self.status_code)
@property
def is_redirect(self) -> bool:
"""
A property which is `True` for 3xx status codes, `False` otherwise.
Note that not all responses with a 3xx status code indicate a URL redirect.
Use `response.has_redirect_location` to determine responses with a properly
formed URL redirection.
"""
return codes.is_redirect(self.status_code)
@property
def is_client_error(self) -> bool:
"""
A property which is `True` for 4xx status codes, `False` otherwise.
"""
return codes.is_client_error(self.status_code)
@property
def is_server_error(self) -> bool:
"""
A property which is `True` for 5xx status codes, `False` otherwise.
"""
return codes.is_server_error(self.status_code)
@property
def is_error(self) -> bool:
"""
A property which is `True` for 4xx and 5xx status codes, `False` otherwise.
"""
return codes.is_error(self.status_code)
@property
def has_redirect_location(self) -> bool:
"""
Returns True for 3xx responses with a properly formed URL redirection,
`False` otherwise.
"""
return (
self.status_code
in (
# 301 (Cacheable redirect. Method may change to GET.)
codes.MOVED_PERMANENTLY,
# 302 (Uncacheable redirect. Method may change to GET.)
codes.FOUND,
# 303 (Client should make a GET or HEAD request.)
codes.SEE_OTHER,
# 307 (Equiv. 302, but retain method)
codes.TEMPORARY_REDIRECT,
# 308 (Equiv. 301, but retain method)
codes.PERMANENT_REDIRECT,
)
and "Location" in self.headers
)
def raise_for_status(self) -> Response:
"""
Raise the `HTTPStatusError` if one occurred.
"""
request = self._request
if request is None:
raise RuntimeError(
"Cannot call `raise_for_status` as the request "
"instance has not been set on this response."
)
if self.is_success:
return self
if self.has_redirect_location:
message = (
"{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n"
"Redirect location: '{0.headers[location]}'\n"
"For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/{0.status_code}"
)
else:
message = (
"{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n"
"For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/{0.status_code}"
)
status_class = self.status_code // 100
error_types = {
1: "Informational response",
3: "Redirect response",
4: "Client error",
5: "Server error",
}
error_type = error_types.get(status_class, "Invalid status code")
message = message.format(self, error_type=error_type)
raise HTTPStatusError(message, request=request, response=self)
def json(self, **kwargs: typing.Any) -> typing.Any:
return jsonlib.loads(self.content, **kwargs)
@property
def cookies(self) -> Cookies:
if not hasattr(self, "_cookies"):
self._cookies = Cookies()
self._cookies.extract_cookies(self)
return self._cookies
@property
def links(self) -> dict[str | None, dict[str, str]]:
"""
Returns the parsed header links of the response, if any
"""
header = self.headers.get("link")
if header is None:
return {}
return {
(link.get("rel") or link.get("url")): link
for link in _parse_header_links(header)
}
@property
def num_bytes_downloaded(self) -> int:
return self._num_bytes_downloaded
def __repr__(self) -> str:
return f"<Response [{self.status_code} {self.reason_phrase}]>"
def __getstate__(self) -> dict[str, typing.Any]:
return {
name: value
for name, value in self.__dict__.items()
if name not in ["extensions", "stream", "is_closed", "_decoder"]
}
def __setstate__(self, state: dict[str, typing.Any]) -> None:
for name, value in state.items():
setattr(self, name, value)
self.is_closed = True
self.extensions = {}
self.stream = UnattachedStream()
def read(self) -> bytes:
"""
Read and return the response content.
"""
if not hasattr(self, "_content"):
self._content = b"".join(self.iter_bytes())
return self._content
def iter_bytes(self, chunk_size: int | None = None) -> typing.Iterator[bytes]:
"""
A byte-iterator over the decoded response content.
This allows us to handle gzip, deflate, brotli, and zstd encoded responses.
"""
if hasattr(self, "_content"):
chunk_size = len(self._content) if chunk_size is None else chunk_size
for i in range(0, len(self._content), max(chunk_size, 1)):
yield self._content[i : i + chunk_size]
else:
decoder = self._get_content_decoder()
chunker = ByteChunker(chunk_size=chunk_size)
with request_context(request=self._request):
for raw_bytes in self.iter_raw():
decoded = decoder.decode(raw_bytes)
for chunk in chunker.decode(decoded):
yield chunk
decoded = decoder.flush()
for chunk in chunker.decode(decoded):
yield chunk # pragma: no cover
for chunk in chunker.flush():
yield chunk
def iter_text(self, chunk_size: int | None = None) -> typing.Iterator[str]:
"""
A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
decoder = TextDecoder(encoding=self.encoding or "utf-8")
chunker = TextChunker(chunk_size=chunk_size)
with request_context(request=self._request):
for byte_content in self.iter_bytes():
text_content = decoder.decode(byte_content)
for chunk in chunker.decode(text_content):
yield chunk
text_content = decoder.flush()
for chunk in chunker.decode(text_content):
yield chunk # pragma: no cover
for chunk in chunker.flush():
yield chunk
def iter_lines(self) -> typing.Iterator[str]:
decoder = LineDecoder()
with request_context(request=self._request):
for text in self.iter_text():
for line in decoder.decode(text):
yield line
for line in decoder.flush():
yield line
def iter_raw(self, chunk_size: int | None = None) -> typing.Iterator[bytes]:
"""
A byte-iterator over the raw response content.
"""
if self.is_stream_consumed:
raise StreamConsumed()
if self.is_closed:
raise StreamClosed()
if not isinstance(self.stream, SyncByteStream):
raise RuntimeError("Attempted to call a sync iterator on an async stream.")
self.is_stream_consumed = True
self._num_bytes_downloaded = 0
chunker = ByteChunker(chunk_size=chunk_size)
with request_context(request=self._request):
for raw_stream_bytes in self.stream:
self._num_bytes_downloaded += len(raw_stream_bytes)
for chunk in chunker.decode(raw_stream_bytes):
yield chunk
for chunk in chunker.flush():
yield chunk
self.close()
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
if not isinstance(self.stream, SyncByteStream):
raise RuntimeError("Attempted to call a sync close on an async stream.")
if not self.is_closed:
self.is_closed = True
with request_context(request=self._request):
self.stream.close()
async def aread(self) -> bytes:
"""
Read and return the response content.
"""
if not hasattr(self, "_content"):
self._content = b"".join([part async for part in self.aiter_bytes()])
return self._content
async def aiter_bytes(
self, chunk_size: int | None = None
) -> typing.AsyncIterator[bytes]:
"""
A byte-iterator over the decoded response content.
This allows us to handle gzip, deflate, brotli, and zstd encoded responses.
"""
if hasattr(self, "_content"):
chunk_size = len(self._content) if chunk_size is None else chunk_size
for i in range(0, len(self._content), max(chunk_size, 1)):
yield self._content[i : i + chunk_size]
else:
decoder = self._get_content_decoder()
chunker = ByteChunker(chunk_size=chunk_size)
with request_context(request=self._request):
async for raw_bytes in self.aiter_raw():
decoded = decoder.decode(raw_bytes)
for chunk in chunker.decode(decoded):
yield chunk
decoded = decoder.flush()
for chunk in chunker.decode(decoded):
yield chunk # pragma: no cover
for chunk in chunker.flush():
yield chunk
async def aiter_text(
self, chunk_size: int | None = None
) -> typing.AsyncIterator[str]:
"""
A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
decoder = TextDecoder(encoding=self.encoding or "utf-8")
chunker = TextChunker(chunk_size=chunk_size)
with request_context(request=self._request):
async for byte_content in self.aiter_bytes():
text_content = decoder.decode(byte_content)
for chunk in chunker.decode(text_content):
yield chunk
text_content = decoder.flush()
for chunk in chunker.decode(text_content):
yield chunk # pragma: no cover
for chunk in chunker.flush():
yield chunk
async def aiter_lines(self) -> typing.AsyncIterator[str]:
decoder = LineDecoder()
with request_context(request=self._request):
async for text in self.aiter_text():
for line in decoder.decode(text):
yield line
for line in decoder.flush():
yield line
async def aiter_raw(
self, chunk_size: int | None = None
) -> typing.AsyncIterator[bytes]:
"""
A byte-iterator over the raw response content.
"""
if self.is_stream_consumed:
raise StreamConsumed()
if self.is_closed:
raise StreamClosed()
if not isinstance(self.stream, AsyncByteStream):
raise RuntimeError("Attempted to call an async iterator on a sync stream.")
self.is_stream_consumed = True
self._num_bytes_downloaded = 0
chunker = ByteChunker(chunk_size=chunk_size)
with request_context(request=self._request):
async for raw_stream_bytes in self.stream:
self._num_bytes_downloaded += len(raw_stream_bytes)
for chunk in chunker.decode(raw_stream_bytes):
yield chunk
for chunk in chunker.flush():
yield chunk
await self.aclose()
async def aclose(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
if not isinstance(self.stream, AsyncByteStream):
raise RuntimeError("Attempted to call an async close on a sync stream.")
if not self.is_closed:
self.is_closed = True
with request_context(request=self._request):
await self.stream.aclose()
|
Response
|
python
|
realpython__materials
|
python-del-statement/sample.py
|
{
"start": 0,
"end": 173
}
|
class ____:
class_attribute = 0
def __init__(self, arg):
self.instance_attribute = arg
def method(self):
print(self.instance_attribute)
|
SampleClass
|
python
|
keras-team__keras
|
keras/src/layers/reshaping/up_sampling3d.py
|
{
"start": 286,
"end": 4910
}
|
class ____(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Example:
>>> input_shape = (2, 1, 2, 1, 3)
>>> x = np.ones(input_shape)
>>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x)
>>> y.shape
(2, 2, 4, 2, 3)
Args:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `"channels_last"` (default) or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else `"channels_last"`.
Defaults to `"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,
channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_dim1, upsampled_dim2,
upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.size = argument_validation.standardize_tuple(size, 3, "size")
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
dim1 = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
dim2 = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
dim3 = (
self.size[2] * input_shape[4]
if input_shape[4] is not None
else None
)
return (input_shape[0], input_shape[1], dim1, dim2, dim3)
else:
dim1 = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
dim2 = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
dim3 = (
self.size[2] * input_shape[3]
if input_shape[3] is not None
else None
)
return (input_shape[0], dim1, dim2, dim3, input_shape[4])
def call(self, inputs):
return self._resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format
)
def get_config(self):
config = {"size": self.size, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
def _resize_volumes(
self, x, depth_factor, height_factor, width_factor, data_format
):
"""Resizes the volume contained in a 5D tensor.
Args:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
Resized tensor.
"""
if data_format == "channels_first":
output = ops.repeat(x, depth_factor, axis=2)
output = ops.repeat(output, height_factor, axis=3)
output = ops.repeat(output, width_factor, axis=4)
return output
elif data_format == "channels_last":
output = ops.repeat(x, depth_factor, axis=1)
output = ops.repeat(output, height_factor, axis=2)
output = ops.repeat(output, width_factor, axis=3)
return output
else:
raise ValueError(f"Invalid data_format: {data_format}")
|
UpSampling3D
|
python
|
jackfrued__Python-100-Days
|
Day31-35/code/example12.py
|
{
"start": 690,
"end": 901
}
|
class ____(Employee):
"""销售员"""
def __init__(self, name, sales=0.0):
self.sales = sales
super().__init__(name)
def get_salary(self):
return 1800.0 + self.sales * 0.05
|
Salesman
|
python
|
mlflow__mlflow
|
dev/clint/tests/rules/test_os_chdir_in_test.py
|
{
"start": 2559,
"end": 2895
}
|
class ____:
@staticmethod
def chdir(path):
pass
fake_os = FakeOs()
def test_func():
fake_os.chdir("/tmp") # Should not trigger since it's not os.chdir
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
|
FakeOs
|
python
|
getsentry__sentry
|
src/sentry/web/frontend/debug/debug_generic_issue.py
|
{
"start": 557,
"end": 2230
}
|
class ____(View):
def get(self, request):
org = Organization(id=1, slug="example", name="Example")
project = Project(id=1, slug="example", name="Example", organization=org)
event = make_generic_event(project)
group = event.group
rule = Rule(id=1, label="An example rule")
generic_issue_data_html = get_generic_data(event)
section_header = "Issue Data" if generic_issue_data_html else ""
return MailPreview(
html_template="sentry/emails/generic.html",
text_template="sentry/emails/generic.txt",
context={
"rule": rule,
"rules": get_rules([rule], org, project, group.type),
"group": group,
"event": event,
"timezone": zoneinfo.ZoneInfo("Europe/Vienna"),
# http://testserver/organizations/example/issues/<issue-id>/?referrer=alert_email
# &alert_type=email&alert_timestamp=<ts>&alert_rule_id=1
"link": get_group_settings_link(
group,
None,
get_rules([rule], org, project, group.type),
),
"generic_issue_data": [(section_header, mark_safe(generic_issue_data_html), None)],
"tags": event.tags,
"project_label": project.slug,
"commits": json.loads(COMMIT_EXAMPLE),
"issue_title": event.occurrence.issue_title,
"subtitle": event.occurrence.subtitle,
"culprit": event.occurrence.culprit,
},
).render(request)
|
DebugGenericIssueEmailView
|
python
|
walkccc__LeetCode
|
solutions/1653. Minimum Deletions to Make String Balanced/1653.py
|
{
"start": 0,
"end": 401
}
|
class ____:
# Same as 926. Flip String to Monotone Increasing
def minimumDeletions(self, s: str) -> int:
dp = 0 # the number of characters to be deleted to make subso far balanced
countB = 0
for c in s:
if c == 'a':
# 1. Delete 'a'.
# 2. Keep 'a' and delete the previous 'b's.
dp = min(dp + 1, countB)
else:
countB += 1
return dp
|
Solution
|
python
|
huggingface__transformers
|
tests/models/unispeech/test_modeling_unispeech.py
|
{
"start": 1431,
"end": 11513
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return UniSpeechConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = UniSpeechModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = UniSpeechModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = UniSpeechForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_values, *args):
model = UniSpeechForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = UniSpeechForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
|
UniSpeechModelTester
|
python
|
pypa__warehouse
|
warehouse/oidc/forms/_core.py
|
{
"start": 4402,
"end": 4725
}
|
class ____(wtforms.Form):
__params__ = ["publisher_id"]
publisher_id = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(message=_("Specify a publisher ID")),
wtforms.validators.UUID(message=_("Publisher must be specified by ID")),
]
)
|
DeletePublisherForm
|
python
|
huggingface__transformers
|
tests/models/kosmos2_5/test_modeling_kosmos2_5.py
|
{
"start": 19637,
"end": 29293
}
|
class ____(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
def run_example(self, prompt, image, model, processor):
inputs = processor(text=prompt, images=image, return_tensors="pt")
inputs = {k: v.to(torch_device) if v is not None else None for k, v in inputs.items()}
inputs["flattened_patches"] = inputs["flattened_patches"].to(model.dtype)
generation_outputs = model.generate(
**inputs,
max_new_tokens=1024,
)
generated_ids = generation_outputs
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_ids, generated_text
def test_eager(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo, device_map=torch_device, dtype=dtype, attn_implementation="eager"
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
],
8: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_644></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_687></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
8: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
def test_sdpa(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo, device_map=torch_device, dtype=dtype, attn_implementation="sdpa"
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n",
],
8: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
8: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_FA2(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo,
device_map=torch_device,
dtype=dtype,
attn_implementation="flash_attention_2",
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_612></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_812><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_650></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_610><x_813><y_644></bbox>0\n<bbox><x_50><y_658><x_65><y_693></bbox>1\n<bbox><x_76><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_815><y_687></bbox>0\n<bbox><x_31><y_742><x_822><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_780><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_874></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_835><y_1108></bbox>Card Payment 50,000\n"
]
self.assertListEqual(generated_text, EXPECTED_TEXT)
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
# A10 gives the 1st one, but A100 gives the 2nd one
EXPECTED_TEXT = [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n\n<table>\n<thead>\n<tr>\n<th>\nSub Total\n</th>\n<th>\n45,455\n</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>\nPB1 (10%)\n</td>\n<td>\n4,545\n</td>\n</tr>\n<tr>\n<td>\nRounding\n</td>\n<td>\n0\n</td>\n</tr>\n<tr>\n<td>\n<strong>\nTotal\n</strong>\n</td>\n<td>\n<strong>\n50,000\n</strong>\n</td>\n</tr>\n</tbody>\n</table>\n\nCard Payment 50,000",
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n",
]
self.assertIn(generated_text[0], EXPECTED_TEXT)
|
Kosmos2_5ModelIntegrationTest
|
python
|
huggingface__transformers
|
src/transformers/models/squeezebert/configuration_squeezebert.py
|
{
"start": 795,
"end": 6561
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a
SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SqueezeBERT
[squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`SqueezeBertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
pad_token_id (`int`, *optional*, defaults to 0):
The ID of the token in the word embedding to use as padding.
embedding_size (`int`, *optional*, defaults to 768):
The dimension of the word embedding vectors.
q_groups (`int`, *optional*, defaults to 4):
The number of groups in Q layer.
k_groups (`int`, *optional*, defaults to 4):
The number of groups in K layer.
v_groups (`int`, *optional*, defaults to 4):
The number of groups in V layer.
post_attention_groups (`int`, *optional*, defaults to 1):
The number of groups in the first feed forward network layer.
intermediate_groups (`int`, *optional*, defaults to 4):
The number of groups in the second feed forward network layer.
output_groups (`int`, *optional*, defaults to 4):
The number of groups in the third feed forward network layer.
Examples:
```python
>>> from transformers import SqueezeBertConfig, SqueezeBertModel
>>> # Initializing a SqueezeBERT configuration
>>> configuration = SqueezeBertConfig()
>>> # Initializing a model (with random weights) from the configuration above
>>> model = SqueezeBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "squeezebert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
embedding_size=768,
q_groups=4,
k_groups=4,
v_groups=4,
post_attention_groups=1,
intermediate_groups=4,
output_groups=4,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.q_groups = q_groups
self.k_groups = k_groups
self.v_groups = v_groups
self.post_attention_groups = post_attention_groups
self.intermediate_groups = intermediate_groups
self.output_groups = output_groups
__all__ = ["SqueezeBertConfig"]
|
SqueezeBertConfig
|
python
|
pypa__warehouse
|
tests/unit/packaging/test_models.py
|
{
"start": 20469,
"end": 43064
}
|
class ____:
def test_getattr(self, db_session):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
file = DBFileFactory.create(
release=release,
filename=f"{release.project.name}-{release.version}.tar.gz",
python_version="source",
)
assert release[file.filename] == file
def test_getattr_invalid_file(self, db_session):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
with pytest.raises(KeyError):
# Well-formed filename, but the File doesn't actually exist.
release[f"{release.project.name}-{release.version}.tar.gz"]
def test_getattr_wrong_file_for_release(self, db_session):
project = DBProjectFactory.create()
release1 = DBReleaseFactory.create(project=project)
release2 = DBReleaseFactory.create(project=project)
file = DBFileFactory.create(
release=release1,
filename=f"{release1.project.name}-{release1.version}.tar.gz",
python_version="source",
)
assert release1[file.filename] == file
# Accessing a file through a different release does not work.
with pytest.raises(KeyError):
release2[file.filename]
def test_has_meta_true_with_keywords(self, db_session):
release = DBReleaseFactory.create(keywords="foo, bar")
assert release.has_meta
def test_has_meta_true_with_author(self, db_session):
release = DBReleaseFactory.create(author="Batman")
assert release.has_meta
release = DBReleaseFactory.create(author_email="wayne@gotham.ny")
assert release.has_meta
def test_has_meta_true_with_maintainer(self, db_session):
release = DBReleaseFactory.create(maintainer="Spiderman")
assert release.has_meta
release = DBReleaseFactory.create(maintainer_email="peter@parker.mrvl")
assert release.has_meta
def test_has_meta_false(self, db_session):
release = DBReleaseFactory.create()
assert not release.has_meta
@pytest.mark.parametrize(
("home_page", "download_url", "project_urls", "expected"),
[
(None, None, [], OrderedDict()),
(
"https://example.com/home/",
None,
[],
OrderedDict([("Homepage", "https://example.com/home/")]),
),
(
None,
"https://example.com/download/",
[],
OrderedDict([("Download", "https://example.com/download/")]),
),
(
"https://example.com/home/",
"https://example.com/download/",
[],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Download", "https://example.com/download/"),
]
),
),
(
None,
None,
["Source Code,https://example.com/source-code/"],
OrderedDict([("Source Code", "https://example.com/source-code/")]),
),
(
None,
None,
["Source Code, https://example.com/source-code/"],
OrderedDict([("Source Code", "https://example.com/source-code/")]),
),
(
"https://example.com/home/",
"https://example.com/download/",
["Source Code,https://example.com/source-code/"],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download/"),
]
),
),
(
"https://example.com/home/",
"https://example.com/download/",
[
"Homepage,https://example.com/home2/",
"Source Code,https://example.com/source-code/",
],
OrderedDict(
[
("Homepage", "https://example.com/home2/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download/"),
]
),
),
(
"https://example.com/home/",
"https://example.com/download/",
[
"Source Code,https://example.com/source-code/",
"Download,https://example.com/download2/",
],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download2/"),
]
),
),
# project_urls has more priority than home_page and download_url
(
"https://example.com/home/",
"https://example.com/download/",
[
"Homepage,https://example.com/home2/",
"Source Code,https://example.com/source-code/",
"Download,https://example.com/download2/",
],
OrderedDict(
[
("Homepage", "https://example.com/home2/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download2/"),
]
),
),
# similar spellings of homepage/download label doesn't duplicate urls
(
"https://example.com/home/",
"https://example.com/download/",
[
"homepage, https://example.com/home/",
"download-URL ,https://example.com/download/",
],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Download", "https://example.com/download/"),
]
),
),
# the duplicate removal only happens if the urls are equal too!
(
"https://example.com/home1/",
None,
[
"homepage, https://example.com/home2/",
],
OrderedDict(
[
("Homepage", "https://example.com/home1/"),
("homepage", "https://example.com/home2/"),
]
),
),
],
)
def test_urls(self, db_session, home_page, download_url, project_urls, expected):
release = DBReleaseFactory.create(
home_page=home_page, download_url=download_url
)
for urlspec in project_urls:
label, _, url = urlspec.partition(",")
db_session.add(
ReleaseURL(
release=release,
name=label.strip(),
url=url.strip(),
)
)
# TODO: It'd be nice to test for the actual ordering here.
assert dict(release.urls) == dict(expected)
@pytest.mark.parametrize(
"release_urls",
[
[
("Issues", "https://github.com/org/user/issues", True),
("Source", "https://github.com/org/user", True),
("Homepage", "https://example.com/", False),
("Download", "https://example.com/", False),
],
[
("Issues", "https://github.com/org/user/issues", True),
("Source", "https://github.com/org/user", True),
("Homepage", "https://homepage.com/", False),
("Download", "https://download.com/", False),
],
[
("Issues", "https://github.com/org/user/issues", True),
("Source", "https://github.com/org/user", True),
("Homepage", "https://homepage.com/", True),
("Download", "https://download.com/", True),
],
],
)
def test_urls_by_verify_status(self, db_session, release_urls):
release = DBReleaseFactory.create(
home_page="https://homepage.com", download_url="https://download.com"
)
for label, url, verified in release_urls:
db_session.add(
ReleaseURL(
release=release,
name=label,
url=url,
verified=verified,
)
)
for verified_status in [True, False]:
for label, url in release.urls_by_verify_status(
verified=verified_status
).items():
assert (label, url, verified_status) in release_urls
@pytest.mark.parametrize(
(
"homepage_metadata_url",
"download_metadata_url",
"extra_url",
"extra_url_verified",
),
[
(
"https://homepage.com",
"https://download.com",
"https://example.com",
True,
),
(
"https://homepage.com",
"https://download.com",
"https://homepage.com",
True,
),
(
"https://homepage.com",
"https://download.com",
"https://homepage.com",
False,
),
(
"https://homepage.com",
"https://download.com",
"https://download.com",
True,
),
(
"https://homepage.com",
"https://download.com",
"https://download.com",
False,
),
],
)
def test_urls_by_verify_status_with_metadata_urls(
self,
db_session,
homepage_metadata_url,
download_metadata_url,
extra_url,
extra_url_verified,
):
release = DBReleaseFactory.create(
home_page=homepage_metadata_url, download_url=download_metadata_url
)
db_session.add(
ReleaseURL(
release=release,
name="extra_url",
url=extra_url,
verified=extra_url_verified,
)
)
verified_urls = release.urls_by_verify_status(verified=True).values()
unverified_urls = release.urls_by_verify_status(verified=False).values()
# Homepage and Download URLs stored separately from the project URLs
# are considered unverified, unless they are equal to URLs present in
# `project_urls` that are verified.
if extra_url_verified:
assert extra_url in verified_urls
if homepage_metadata_url != extra_url:
assert homepage_metadata_url in unverified_urls
if download_metadata_url != extra_url:
assert download_metadata_url in unverified_urls
else:
assert extra_url in unverified_urls
assert homepage_metadata_url in unverified_urls
assert download_metadata_url in unverified_urls
def test_acl(self, db_session):
project = DBProjectFactory.create()
owner1 = DBRoleFactory.create(project=project)
owner2 = DBRoleFactory.create(project=project)
maintainer1 = DBRoleFactory.create(project=project, role_name="Maintainer")
maintainer2 = DBRoleFactory.create(project=project, role_name="Maintainer")
release = DBReleaseFactory.create(project=project)
acls = []
for location in lineage(release):
try:
acl = location.__acl__
except AttributeError:
continue
acls.extend(acl())
assert acls == [
(
Allow,
"group:admins",
(
Permissions.AdminDashboardSidebarRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminProhibitedProjectsWrite,
Permissions.AdminProhibitedUsernameWrite,
Permissions.AdminProjectsDelete,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminProjectsWrite,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
),
),
(
Allow,
"group:moderators",
(
Permissions.AdminDashboardSidebarRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
),
),
(
Allow,
"group:observers",
Permissions.APIObservationsAdd,
),
(
Allow,
Authenticated,
Permissions.SubmitMalwareObservation,
),
] + sorted(
[
(
Allow,
f"user:{owner1.user.id}",
[
Permissions.ProjectsRead,
Permissions.ProjectsUpload,
Permissions.ProjectsWrite,
],
),
(
Allow,
f"user:{owner2.user.id}",
[
Permissions.ProjectsRead,
Permissions.ProjectsUpload,
Permissions.ProjectsWrite,
],
),
],
key=lambda x: x[1],
) + sorted(
[
(Allow, f"user:{maintainer1.user.id}", [Permissions.ProjectsUpload]),
(Allow, f"user:{maintainer2.user.id}", [Permissions.ProjectsUpload]),
],
key=lambda x: x[1],
)
@pytest.mark.parametrize(
("url", "expected"),
[
(
"https://github.com/pypi/warehouse",
"https://api.github.com/repos/pypi/warehouse",
),
(
"https://github.com/pypi/warehouse/",
"https://api.github.com/repos/pypi/warehouse",
),
(
"https://github.com/pypi/warehouse/tree/main",
"https://api.github.com/repos/pypi/warehouse",
),
(
"https://www.github.com/pypi/warehouse",
"https://api.github.com/repos/pypi/warehouse",
),
("https://github.com/pypa/", None),
("https://github.com/sponsors/pypa/", None),
("https://google.com/pypi/warehouse/tree/main", None),
("https://google.com", None),
("incorrect url", None),
(
"https://www.github.com/pypi/warehouse.git",
"https://api.github.com/repos/pypi/warehouse",
),
(
"https://www.github.com/pypi/warehouse.git/",
"https://api.github.com/repos/pypi/warehouse",
),
("git@bitbucket.org:definex/dsgnutils.git", None),
],
)
def test_verified_github_repo_info_url(self, db_session, url, expected):
release = DBReleaseFactory.create()
release.project_urls["Homepage"] = {"url": url, "verified": True}
assert release.verified_github_repo_info_url == expected
def test_verified_github_repo_info_url_is_none_without_verified_url(
self,
db_session,
):
release = DBReleaseFactory.create()
assert release.verified_github_repo_info_url is None
@pytest.mark.parametrize(
("url", "expected"),
[
(
"https://github.com/pypi/warehouse",
"https://api.github.com/search/issues?q=repo:pypi/warehouse"
"+type:issue+state:open&per_page=1",
),
(
"https://github.com/pypi/warehouse/",
"https://api.github.com/search/issues?q=repo:pypi/warehouse+"
"type:issue+state:open&per_page=1",
),
(
"https://github.com/pypi/warehouse/tree/main",
"https://api.github.com/search/issues?q=repo:pypi/warehouse"
"+type:issue+state:open&per_page=1",
),
(
"https://www.github.com/pypi/warehouse",
"https://api.github.com/search/issues?q=repo:pypi/warehouse"
"+type:issue+state:open&per_page=1",
),
("https://github.com/pypa/", None),
("https://github.com/sponsors/pypa/", None),
("https://google.com/pypi/warehouse/tree/main", None),
("https://google.com", None),
("incorrect url", None),
(
"https://www.github.com/pypi/warehouse.git",
"https://api.github.com/search/issues?q=repo:pypi/warehouse"
"+type:issue+state:open&per_page=1",
),
(
"https://www.github.com/pypi/warehouse.git/",
"https://api.github.com/search/issues?q=repo:pypi/warehouse"
"+type:issue+state:open&per_page=1",
),
],
)
def test_verified_github_open_issue_info_url(self, db_session, url, expected):
release = DBReleaseFactory.create()
release.project_urls["Homepage"] = {"url": url, "verified": True}
assert release.verified_github_open_issue_info_url == expected
def test_verified_github_open_issueo_info_url_is_none_without_verified_url(
self,
db_session,
):
release = DBReleaseFactory.create()
assert release.verified_github_open_issue_info_url is None
@pytest.mark.parametrize(
("url", "expected"),
[
(
"https://gitlab.com/someuser/someproject",
"someuser/someproject",
),
(
"https://gitlab.com/someuser/someproject/",
"someuser/someproject",
),
(
"https://gitlab.com/someuser/someproject/-/tree/stable-9",
"someuser/someproject",
),
(
"https://www.gitlab.com/someuser/someproject",
"someuser/someproject",
),
("https://gitlab.com/someuser/", None),
("https://google.com/pypi/warehouse/tree/main", None),
("https://google.com", None),
("incorrect url", None),
(
"https://gitlab.com/someuser/someproject.git",
"someuser/someproject",
),
(
"https://www.gitlab.com/someuser/someproject.git/",
"someuser/someproject",
),
("git@bitbucket.org:definex/dsgnutils.git", None),
],
)
def test_verified_gitlab_repository(self, db_session, url, expected):
release = DBReleaseFactory.create()
release.project_urls["Homepage"] = {"url": url, "verified": True}
assert release.verified_gitlab_repository == expected
def test_verified_gitlab_repository_is_none_without_verified_url(
self,
db_session,
):
release = DBReleaseFactory.create()
assert release.verified_gitlab_repository is None
def test_trusted_published_none(self, db_session):
release = DBReleaseFactory.create()
assert not release.trusted_published
def test_trusted_published_all(self, db_session):
release = DBReleaseFactory.create()
release_file = DBFileFactory.create(
release=release,
filename=f"{release.project.name}-{release.version}.tar.gz",
python_version="source",
)
DBFileEventFactory.create(
source=release_file,
tag="fake:event",
)
# Without a `publisher_url` value, not considered trusted published
assert not release.trusted_published
DBFileEventFactory.create(
source=release_file,
tag="fake:event",
additional={"publisher_url": "https://fake/url"},
)
assert release.trusted_published
def test_trusted_published_mixed(self, db_session):
release = DBReleaseFactory.create()
rfile_1 = DBFileFactory.create(
release=release,
filename=f"{release.project.name}-{release.version}.tar.gz",
python_version="source",
packagetype="sdist",
)
rfile_2 = DBFileFactory.create(
release=release,
filename=f"{release.project.name}-{release.version}.whl",
python_version="bdist_wheel",
packagetype="bdist_wheel",
)
DBFileEventFactory.create(
source=rfile_1,
tag="fake:event",
)
DBFileEventFactory.create(
source=rfile_2,
tag="fake:event",
additional={"publisher_url": "https://fake/url"},
)
assert not release.trusted_published
def test_description_relationship(self, db_request):
"""When a Release is deleted, the Description is also deleted."""
release = DBReleaseFactory.create() # also creates a Description
description = release.description
db_request.db.delete(release)
assert release in db_request.db.deleted
assert description in db_request.db.deleted
|
TestRelease
|
python
|
has2k1__plotnine
|
plotnine/scales/limits.py
|
{
"start": 316,
"end": 2908
}
|
class ____:
aesthetic = None
def __init__(self, *limits):
if not limits:
msg = "{}lim(), is missing limits"
raise PlotnineError(msg.format(self.aesthetic))
elif len(limits) == 1:
limits = limits[0]
series = pd.Series(limits)
# Type of transform
if not any(x is None for x in limits) and limits[0] > limits[1]:
self.trans = "reverse"
elif array_kind.continuous(series):
self.trans = "identity"
elif array_kind.discrete(series):
self.trans = None
elif array_kind.datetime(series):
self.trans = "datetime"
elif array_kind.timedelta(series):
self.trans = "timedelta"
else:
msg = f"Unknown type {type(limits[0])} of limits"
raise TypeError(msg)
self.limits = limits
self.limits_series = series
def get_scale(self, plot):
"""
Create a scale
"""
# This method does some introspection to save users from
# scale mismatch error. This could happen when the
# aesthetic is mapped to a categorical but the limits
# are not provided in categorical form. We only handle
# the case where the mapping uses an expression to
# convert to categorical e.g `aes(color="factor(cyl)")`.
# However if `"cyl"` column is a categorical and the
# mapping is `aes(color="cyl")`, that will result in
# an error. If later case proves common enough then we
# could inspect the data and be clever based on that too!!
ae = self.aesthetic
series = self.limits_series
ae_values = []
# Look through all the mappings for this aesthetic,
# if we detect any factor stuff then we convert the
# limits data to categorical so that the right scale
# can be chosen. This should take care of the most
# common use cases.
for layer in plot.layers:
with suppress(KeyError):
value = layer.mapping[ae]
if isinstance(value, str):
ae_values.append(value)
for value in ae_values:
if "factor(" in value or "Categorical(" in value:
series = pd.Categorical(self.limits_series)
break
return make_scale(
self.aesthetic, series, limits=self.limits, trans=self.trans
)
def __radd__(self, other):
scale = self.get_scale(other)
other.scales.append(scale)
return other
|
_lim
|
python
|
ipython__ipython
|
tests/test_formatters.py
|
{
"start": 620,
"end": 672
}
|
class ____(object):
_repr_pretty_ = None
|
BadPretty
|
python
|
doocs__leetcode
|
solution/1600-1699/1600.Throne Inheritance/Solution.py
|
{
"start": 0,
"end": 771
}
|
class ____:
def __init__(self, kingName: str):
self.king = kingName
self.dead = set()
self.g = defaultdict(list)
def birth(self, parentName: str, childName: str) -> None:
self.g[parentName].append(childName)
def death(self, name: str) -> None:
self.dead.add(name)
def getInheritanceOrder(self) -> List[str]:
def dfs(x: str):
x not in self.dead and ans.append(x)
for y in self.g[x]:
dfs(y)
ans = []
dfs(self.king)
return ans
# Your ThroneInheritance object will be instantiated and called as such:
# obj = ThroneInheritance(kingName)
# obj.birth(parentName,childName)
# obj.death(name)
# param_3 = obj.getInheritanceOrder()
|
ThroneInheritance
|
python
|
google__pytype
|
pytype/tools/xref/indexer_test.py
|
{
"start": 9370,
"end": 9772
}
|
class ____(test_base.BaseTest, IndexerTestMixin):
def test_type_annotations(self):
ix = self.index_code("""
def f(x: int) -> int:
return x
""".lstrip("\n"))
self.assertDef(ix, "module.f", "f", "FunctionDef")
self.assertDef(ix, "module.f.x", "x", "Param")
self.assertDefLocs(ix, "module.f", [(1, 0)])
self.assertDefLocs(ix, "module.f.x", [(1, 6)])
|
IndexerTestPy3
|
python
|
pydantic__pydantic
|
pydantic/_internal/_repr.py
|
{
"start": 1075,
"end": 5172
}
|
class ____:
# Mixin to provide `__str__`, `__repr__`, and `__pretty__` and `__rich_repr__` methods.
# `__pretty__` is used by [devtools](https://python-devtools.helpmanual.io/).
# `__rich_repr__` is used by [rich](https://rich.readthedocs.io/en/stable/pretty.html).
# (this is not a docstring to avoid adding a docstring to classes which inherit from Representation)
__slots__ = ()
def __repr_args__(self) -> ReprArgs:
"""Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.
Can either return:
* name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`
* or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`
"""
attrs_names = cast(Collection[str], self.__slots__)
if not attrs_names and hasattr(self, '__dict__'):
attrs_names = self.__dict__.keys()
attrs = ((s, getattr(self, s)) for s in attrs_names)
return [(a, v if v is not self else self.__repr_recursion__(v)) for a, v in attrs if v is not None]
def __repr_name__(self) -> str:
"""Name of the instance's class, used in __repr__."""
return self.__class__.__name__
def __repr_recursion__(self, object: Any) -> str:
"""Returns the string representation of a recursive object."""
# This is copied over from the stdlib `pprint` module:
return f'<Recursion on {type(object).__name__} with id={id(object)}>'
def __repr_str__(self, join_str: str) -> str:
return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())
def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any]:
"""Used by devtools (https://python-devtools.helpmanual.io/) to pretty print objects."""
yield self.__repr_name__() + '('
yield 1
for name, value in self.__repr_args__():
if name is not None:
yield name + '='
yield fmt(value)
yield ','
yield 0
yield -1
yield ')'
def __rich_repr__(self) -> RichReprResult:
"""Used by Rich (https://rich.readthedocs.io/en/stable/pretty.html) to pretty print objects."""
for name, field_repr in self.__repr_args__():
if name is None:
yield field_repr
else:
yield name, field_repr
def __str__(self) -> str:
return self.__repr_str__(' ')
def __repr__(self) -> str:
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
def display_as_type(obj: Any) -> str:
"""Pretty representation of a type, should be as close as possible to the original type definition string.
Takes some logic from `typing._type_repr`.
"""
if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)):
return obj.__name__
elif obj is ...:
return '...'
elif isinstance(obj, Representation):
return repr(obj)
elif isinstance(obj, ForwardRef) or typing_objects.is_typealiastype(obj):
return str(obj)
if not isinstance(obj, (_typing_extra.typing_base, _typing_extra.WithArgsTypes, type)):
obj = obj.__class__
if is_union_origin(typing_extensions.get_origin(obj)):
args = ', '.join(map(display_as_type, typing_extensions.get_args(obj)))
return f'Union[{args}]'
elif isinstance(obj, _typing_extra.WithArgsTypes):
if typing_objects.is_literal(typing_extensions.get_origin(obj)):
args = ', '.join(map(repr, typing_extensions.get_args(obj)))
else:
args = ', '.join(map(display_as_type, typing_extensions.get_args(obj)))
try:
return f'{obj.__qualname__}[{args}]'
except AttributeError:
return str(obj).replace('typing.', '').replace('typing_extensions.', '') # handles TypeAliasType in 3.12
elif isinstance(obj, type):
return obj.__qualname__
else:
return repr(obj).replace('typing.', '').replace('typing_extensions.', '')
|
Representation
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/markup.py
|
{
"start": 1750,
"end": 3381
}
|
class ____(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'\}\}\}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'\{\{\{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
|
MoinWikiLexer
|
python
|
pytorch__pytorch
|
torch/export/dynamic_shapes.py
|
{
"start": 16462,
"end": 17143
}
|
class ____(_ConstraintTarget):
"""
This represents a dim marked with Dim.AUTO/DYNAMIC (i.e. mark_dynamic() or maybe_mark_dynamic()),
which leaves relations & min/max ranges for inference, instead of requiring explicit specification.
The intention is for constraint violations to not be raised if produce_guards() finds equalities or
relations between a _RelaxedConstraint and another type of _Constraint.
"""
@property
def serializable_spec(self):
return {
"t_id": self.t_id,
"dim": self.dim,
}
Constraint = Union[_Constraint, _DerivedConstraint, _RelaxedConstraint]
@dataclasses.dataclass
|
_RelaxedConstraint
|
python
|
google__pytype
|
pytype/errors/error_types.py
|
{
"start": 4087,
"end": 4438
}
|
class ____(InvalidParameters):
"""E.g. a function requires parameter 'x' but 'x' isn't passed."""
def __init__(self, sig, passed_args, ctx, missing_parameter):
super().__init__(sig, passed_args, ctx)
self.missing_parameter = missing_parameter
# --------------------------------------------------------
# Typed dict errors
|
MissingParameter
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/instance/methods/asset_methods.py
|
{
"start": 2133,
"end": 30227
}
|
class ____:
"""Mixin class containing asset-related functionality for DagsterInstance.
This class consolidates asset operations from both AssetDomain and AssetMixin,
providing methods for asset management, materialization tracking, and health monitoring.
All methods are implemented as instance methods that DagsterInstance inherits.
"""
# These methods/properties are provided by DagsterInstance
# (no abstract declarations needed since DagsterInstance already implements them)
@property
def _instance(self) -> "DagsterInstance":
"""Cast self to DagsterInstance for type-safe access to instance methods and properties."""
from dagster._core.instance.instance import DagsterInstance
return check.inst(self, DagsterInstance)
# Private member access wrapper with consolidated type: ignore
@property
def _event_storage_impl(self):
"""Access to event storage."""
return self._instance._event_storage # noqa: SLF001
def can_read_asset_status_cache(self) -> bool:
"""Check if asset status cache can be read - moved from AssetDomain.can_read_asset_status_cache()."""
return self._event_storage_impl.can_read_asset_status_cache()
def update_asset_cached_status_data(
self, asset_key: "AssetKey", cache_values: "AssetStatusCacheValue"
) -> None:
"""Update asset cached status data - moved from AssetDomain.update_asset_cached_status_data()."""
self._event_storage_impl.update_asset_cached_status_data(asset_key, cache_values)
def wipe_asset_cached_status(self, asset_keys: Sequence["AssetKey"]) -> None:
"""Wipe asset cached status - moved from AssetDomain.wipe_asset_cached_status()."""
check.list_param(asset_keys, "asset_keys", of_type=AssetKey)
for asset_key in asset_keys:
self._event_storage_impl.wipe_asset_cached_status(asset_key)
def all_asset_keys(self) -> Sequence["AssetKey"]:
"""Get all asset keys - moved from AssetDomain.all_asset_keys()."""
return self._event_storage_impl.all_asset_keys()
@traced
def get_latest_materialization_events(
self, asset_keys: Iterable["AssetKey"]
) -> Mapping["AssetKey", Optional["EventLogEntry"]]:
"""Get latest materialization events - moved from AssetDomain.get_latest_materialization_events()."""
return self._event_storage_impl.get_latest_materialization_events(asset_keys)
@traced
def get_latest_asset_check_evaluation_record(
self, asset_check_key: "AssetCheckKey"
) -> Optional["AssetCheckExecutionRecord"]:
"""Get latest asset check evaluation record - moved from AssetDomain.get_latest_asset_check_evaluation_record()."""
return self._event_storage_impl.get_latest_asset_check_execution_by_key(
[asset_check_key]
).get(asset_check_key)
def fetch_failed_materializations(
self,
records_filter: Union["AssetKey", "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of AssetFailedToMaterialization records stored in the event log storage.
Moved from AssetDomain.fetch_failed_materializations().
Args:
records_filter (Union[AssetKey, AssetRecordsFilter]): the filter by which to
filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string.
"""
return self._event_storage_impl.fetch_failed_materializations(
records_filter, limit, cursor, ascending
)
def wipe_asset_partitions(
self,
asset_key: "AssetKey",
partition_keys: Sequence[str],
) -> None:
"""Wipes asset event history from the event log for the given asset key and partition keys.
Moved from AssetDomain.wipe_asset_partitions().
Args:
asset_key (AssetKey): Asset key to wipe.
partition_keys (Sequence[str]): Partition keys to wipe.
"""
from dagster._core.events import AssetWipedData, DagsterEvent, DagsterEventType
from dagster._core.instance.utils import RUNLESS_JOB_NAME, RUNLESS_RUN_ID
self._event_storage_impl.wipe_asset_partitions(asset_key, partition_keys)
self.report_dagster_event( # type: ignore[attr-defined]
DagsterEvent(
event_type_value=DagsterEventType.ASSET_WIPED.value,
event_specific_data=AssetWipedData(
asset_key=asset_key, partition_keys=partition_keys
),
job_name=RUNLESS_JOB_NAME,
),
run_id=RUNLESS_RUN_ID,
)
def get_event_tags_for_asset(
self,
asset_key: "AssetKey",
filter_tags: Optional[Mapping[str, str]] = None,
filter_event_id: Optional[int] = None,
) -> Sequence[Mapping[str, str]]:
"""Fetches asset event tags for the given asset key.
Moved from AssetDomain.get_event_tags_for_asset().
If filter_tags is provided, searches for events containing all of the filter tags. Then,
returns all tags for those events. This enables searching for multipartitioned asset
partition tags with a fixed dimension value, e.g. all of the tags for events where
"country" == "US".
If filter_event_id is provided, searches for the event with the provided event_id.
Returns a list of dicts, where each dict is a mapping of tag key to tag value for a
single event.
"""
return self._event_storage_impl.get_event_tags_for_asset(
asset_key, filter_tags, filter_event_id
)
@traced
def get_latest_planned_materialization_info(
self,
asset_key: "AssetKey",
partition: Optional[str] = None,
) -> Optional["PlannedMaterializationInfo"]:
"""Get latest planned materialization info.
Moved from AssetDomain.get_latest_planned_materialization_info().
"""
return self._event_storage_impl.get_latest_planned_materialization_info(
asset_key, partition
)
@traced
def get_materialized_partitions(
self,
asset_key: "AssetKey",
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> set[str]:
"""Get materialized partitions for an asset - moved from AssetDomain.get_materialized_partitions()."""
return self._event_storage_impl.get_materialized_partitions(
asset_key, before_cursor=before_cursor, after_cursor=after_cursor
)
@traced
def get_latest_storage_id_by_partition(
self,
asset_key: "AssetKey",
event_type: "DagsterEventType",
partitions: Optional[set[str]] = None,
) -> Mapping[str, int]:
"""Fetch the latest materialization storage id for each partition for a given asset key.
Moved from AssetDomain.get_latest_storage_id_by_partition().
Returns a mapping of partition to storage id.
"""
return self._event_storage_impl.get_latest_storage_id_by_partition(
asset_key, event_type, partitions
)
@deprecated(breaking_version="2.0")
def fetch_planned_materializations(
self,
records_filter: Union["AssetKey", "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of planned materialization records stored in the event log storage.
Moved from AssetDomain.fetch_planned_materializations().
Args:
records_filter (Optional[Union[AssetKey, AssetRecordsFilter]]): the filter by which to
filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string
"""
from dagster._core.event_api import EventLogCursor
from dagster._core.events import DagsterEventType
from dagster._core.storage.event_log.base import EventRecordsFilter, EventRecordsResult
event_records_filter = (
EventRecordsFilter(DagsterEventType.ASSET_MATERIALIZATION_PLANNED, records_filter)
if isinstance(records_filter, AssetKey)
else records_filter.to_event_records_filter(
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
cursor=cursor,
ascending=ascending,
)
)
records = self._event_storage_impl.get_event_records(
event_records_filter, limit=limit, ascending=ascending
)
if records:
new_cursor = EventLogCursor.from_storage_id(records[-1].storage_id).to_string()
elif cursor:
new_cursor = cursor
else:
new_cursor = EventLogCursor.from_storage_id(-1).to_string()
has_more = len(records) == limit
return EventRecordsResult(records, cursor=new_cursor, has_more=has_more)
def _report_runless_asset_event(
self,
asset_event: Union[
"AssetMaterialization",
"AssetObservation",
"AssetCheckEvaluation",
"FreshnessStateEvaluation",
"FreshnessStateChange",
],
):
"""Use this directly over report_runless_asset_event to emit internal events.
Moved from AssetDomain._report_runless_asset_event().
"""
from dagster._core.events import (
AssetMaterialization,
AssetObservationData,
DagsterEvent,
DagsterEventType,
StepMaterializationData,
)
from dagster._core.instance.utils import RUNLESS_JOB_NAME, RUNLESS_RUN_ID
if isinstance(asset_event, AssetMaterialization):
event_type_value = DagsterEventType.ASSET_MATERIALIZATION.value
data_payload = StepMaterializationData(asset_event)
elif isinstance(asset_event, AssetCheckEvaluation):
event_type_value = DagsterEventType.ASSET_CHECK_EVALUATION.value
data_payload = asset_event
elif isinstance(asset_event, AssetObservation):
event_type_value = DagsterEventType.ASSET_OBSERVATION.value
data_payload = AssetObservationData(asset_event)
elif isinstance(asset_event, FreshnessStateEvaluation):
event_type_value = DagsterEventType.FRESHNESS_STATE_EVALUATION.value
data_payload = asset_event
elif isinstance(asset_event, FreshnessStateChange):
event_type_value = DagsterEventType.FRESHNESS_STATE_CHANGE.value
data_payload = asset_event
else:
from dagster._core.errors import DagsterInvariantViolationError
raise DagsterInvariantViolationError(
f"Received unexpected asset event type {asset_event}, expected"
" AssetMaterialization, AssetObservation, AssetCheckEvaluation, FreshnessStateEvaluation or FreshnessStateChange"
)
return self.report_dagster_event( # type: ignore[attr-defined]
run_id=RUNLESS_RUN_ID,
dagster_event=DagsterEvent(
event_type_value=event_type_value,
event_specific_data=data_payload,
job_name=RUNLESS_JOB_NAME,
),
)
def get_asset_check_health_state_for_assets(
self, asset_keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", Optional["AssetCheckHealthState"]]:
"""Get asset check health state for assets.
Moved from AssetDomain.get_asset_check_health_state_for_assets().
"""
return {asset_key: None for asset_key in asset_keys}
def get_asset_freshness_health_state_for_assets(
self, asset_keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", Optional["AssetFreshnessHealthState"]]:
"""Get asset freshness health state for assets.
Moved from AssetDomain.get_asset_freshness_health_state_for_assets().
"""
return {asset_key: None for asset_key in asset_keys}
def get_asset_materialization_health_state_for_assets(
self, asset_keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", Optional["AssetMaterializationHealthState"]]:
"""Get asset materialization health state for assets.
Moved from AssetDomain.get_asset_materialization_health_state_for_assets().
"""
return {asset_key: None for asset_key in asset_keys}
def get_minimal_asset_materialization_health_state_for_assets(
self, asset_keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", Optional["MinimalAssetMaterializationHealthState"]]:
"""Get minimal asset materialization health state for assets.
Moved from AssetDomain.get_minimal_asset_materialization_health_state_for_assets().
"""
return {asset_key: None for asset_key in asset_keys}
def get_latest_data_version_record(
self,
key: AssetKey,
is_source: Optional[bool] = None,
partition_key: Optional[str] = None,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> Optional["EventLogRecord"]:
"""Get the latest data version record for an asset.
Moved from AssetDomain.get_latest_data_version_record().
Args:
key (AssetKey): The asset key to get the latest data version record for.
is_source (Optional[bool]): Whether the asset is a source asset. If True, fetches
observations. If False, fetches materializations. If None, fetches materializations
first, then observations if no materialization found.
partition_key (Optional[str]): The partition key to filter by.
before_cursor (Optional[int]): Only return records with storage ID less than this.
after_cursor (Optional[int]): Only return records with storage ID greater than this.
Returns:
Optional[EventLogRecord]: The latest data version record, or None if not found.
"""
from dagster._core.storage.event_log.base import AssetRecordsFilter
records_filter = AssetRecordsFilter(
asset_key=key,
asset_partitions=[partition_key] if partition_key else None,
before_storage_id=before_cursor,
after_storage_id=after_cursor,
)
if is_source is True:
# this is a source asset, fetch latest observation record
return next(
iter(self.fetch_observations(records_filter, limit=1).records), # type: ignore[attr-defined]
None,
)
elif is_source is False:
# this is not a source asset, fetch latest materialization record
return next(
iter(self.fetch_materializations(records_filter, limit=1).records),
None,
)
else:
assert is_source is None
# if is_source is None, the requested key could correspond to either a source asset or
# materializable asset. If there is a non-null materialization, we are dealing with a
# materializable asset and should just return that. If not, we should check for any
# observation records that may match.
materialization = next(
iter(self.fetch_materializations(records_filter, limit=1).records),
None,
)
if materialization:
return materialization
return next(
iter(self.fetch_observations(records_filter, limit=1).records), # type: ignore[attr-defined]
None,
)
# Additional methods from AssetMixin that don't duplicate AssetDomain methods
def get_freshness_state_records(
self, keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", "FreshnessStateRecord"]:
"""Get freshness state records - moved from AssetMixin.get_freshness_state_records()."""
return self._event_storage_impl.get_freshness_state_records(keys)
def get_asset_check_support(self) -> "AssetCheckInstanceSupport":
"""Get asset check support - moved from AssetMixin.get_asset_check_support()."""
from dagster._core.storage.asset_check_execution_record import AssetCheckInstanceSupport
return (
AssetCheckInstanceSupport.SUPPORTED
if self._event_storage_impl.supports_asset_checks
else AssetCheckInstanceSupport.NEEDS_MIGRATION
)
def dagster_asset_health_queries_supported(self) -> bool:
"""Check if asset health queries are supported - moved from AssetMixin.dagster_asset_health_queries_supported()."""
return False
def can_read_failure_events_for_asset(self, _asset_record: "AssetRecord") -> bool:
"""Check if failure events can be read for asset - moved from AssetMixin.can_read_failure_events_for_asset()."""
return False
def can_read_asset_failure_events(self) -> bool:
"""Check if asset failure events can be read - moved from AssetMixin.can_read_asset_failure_events()."""
return False
def internal_asset_freshness_enabled(self) -> bool:
"""Check if internal asset freshness is enabled - moved from AssetMixin.internal_asset_freshness_enabled()."""
return os.getenv("DAGSTER_ASSET_FRESHNESS_ENABLED", "").lower() != "false"
def streamline_read_asset_health_supported(self, streamline_name: StreamlineName) -> bool:
"""Check if streamline read asset health is supported - moved from AssetMixin.streamline_read_asset_health_supported()."""
return False
def streamline_read_asset_health_required(self, streamline_name: StreamlineName) -> bool:
"""Check if streamline read asset health is required - moved from AssetMixin.streamline_read_asset_health_required()."""
return False
# Implementation methods for public API methods (called from DagsterInstance public methods)
def fetch_materializations(
self,
records_filter: Union["AssetKey", "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of materialization records stored in the event log storage.
Moved from AssetDomain.fetch_materializations().
Args:
records_filter (Union[AssetKey, AssetRecordsFilter]): the filter by which to
filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string.
"""
return self._event_storage_impl.fetch_materializations(
records_filter, limit, cursor, ascending
)
def get_asset_keys(
self,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> Sequence["AssetKey"]:
"""Return a filtered subset of asset keys managed by this instance.
Moved from AssetDomain.get_asset_keys().
Args:
prefix (Optional[Sequence[str]]): Return only assets having this key prefix.
limit (Optional[int]): Maximum number of keys to return.
cursor (Optional[str]): Cursor to use for pagination.
Returns:
Sequence[AssetKey]: List of asset keys.
"""
return self._event_storage_impl.get_asset_keys(prefix=prefix, limit=limit, cursor=cursor)
def get_asset_records(
self, asset_keys: Optional[Sequence["AssetKey"]] = None
) -> Sequence["AssetRecord"]:
"""Return an `AssetRecord` for each of the given asset keys.
Moved from AssetDomain.get_asset_records().
Args:
asset_keys (Optional[Sequence[AssetKey]]): List of asset keys to retrieve records for.
Returns:
Sequence[AssetRecord]: List of asset records.
"""
return self._event_storage_impl.get_asset_records(asset_keys)
def get_latest_materialization_code_versions(
self, asset_keys: Iterable["AssetKey"]
) -> Mapping["AssetKey", Optional[str]]:
"""Returns the code version used for the latest materialization of each of the provided
assets. Moved from AssetDomain.get_latest_materialization_code_versions().
Args:
asset_keys (Iterable[AssetKey]): The asset keys to find latest materialization code
versions for.
Returns:
Mapping[AssetKey, Optional[str]]: A dictionary with a key for each of the provided asset
keys. The values will be None if the asset has no materializations. If an asset does
not have a code version explicitly assigned to its definitions, but was
materialized, Dagster assigns the run ID as its code version.
"""
from dagster._core.definitions.data_version import extract_data_provenance_from_entry
result: dict[AssetKey, Optional[str]] = {}
latest_materialization_events = self.get_latest_materialization_events(asset_keys)
for asset_key in asset_keys:
event_log_entry = latest_materialization_events.get(asset_key)
if event_log_entry is None:
result[asset_key] = None
else:
data_provenance = extract_data_provenance_from_entry(event_log_entry)
result[asset_key] = data_provenance.code_version if data_provenance else None
return result
def get_latest_materialization_event(self, asset_key: "AssetKey") -> Optional["EventLogEntry"]:
"""Fetch the latest materialization event for the given asset key.
Moved from AssetDomain.get_latest_materialization_event().
Args:
asset_key (AssetKey): Asset key to return materialization for.
Returns:
Optional[EventLogEntry]: The latest materialization event for the given asset
key, or `None` if the asset has not been materialized.
"""
return self._event_storage_impl.get_latest_materialization_events([asset_key]).get(
asset_key
)
def get_status_by_partition(
self,
asset_key: AssetKey,
partition_keys: Sequence[str],
partitions_def: "PartitionsDefinition",
) -> Optional[Mapping[str, "AssetPartitionStatus"]]:
"""Get the current status of provided partition_keys for the provided asset.
Moved from AssetDomain.get_status_by_partition().
Args:
asset_key (AssetKey): The asset to get per-partition status for.
partition_keys (Sequence[str]): The partitions to get status for.
partitions_def (PartitionsDefinition): The PartitionsDefinition of the asset to get
per-partition status for.
Returns:
Optional[Mapping[str, AssetPartitionStatus]]: status for each partition key
"""
from typing import cast
from dagster._core.storage.partition_status_cache import (
AssetPartitionStatus,
AssetStatusCacheValue,
get_and_update_asset_status_cache_value,
)
# Cast is safe since this mixin is only used by DagsterInstance
cached_value = get_and_update_asset_status_cache_value(
cast("DagsterInstance", self), asset_key, partitions_def
)
if isinstance(cached_value, AssetStatusCacheValue):
materialized_partitions = cached_value.deserialize_materialized_partition_subsets(
partitions_def
)
failed_partitions = cached_value.deserialize_failed_partition_subsets(partitions_def)
in_progress_partitions = cached_value.deserialize_in_progress_partition_subsets(
partitions_def
)
status_by_partition = {}
for partition_key in partition_keys:
if partition_key in in_progress_partitions:
status_by_partition[partition_key] = AssetPartitionStatus.IN_PROGRESS
elif partition_key in failed_partitions:
status_by_partition[partition_key] = AssetPartitionStatus.FAILED
elif partition_key in materialized_partitions:
status_by_partition[partition_key] = AssetPartitionStatus.MATERIALIZED
else:
status_by_partition[partition_key] = None
return status_by_partition
def has_asset_key(self, asset_key: "AssetKey") -> bool:
"""Return true if this instance manages the given asset key.
Moved from AssetDomain.has_asset_key().
Args:
asset_key (AssetKey): Asset key to check.
"""
return self._event_storage_impl.has_asset_key(asset_key)
def report_runless_asset_event(
self,
asset_event: Union[
"AssetMaterialization",
"AssetObservation",
"AssetCheckEvaluation",
"FreshnessStateEvaluation",
"FreshnessStateChange",
],
):
"""Record an event log entry related to assets that does not belong to a Dagster run.
Moved from AssetDomain.report_runless_asset_event().
"""
from dagster._core.events import AssetMaterialization
if not isinstance(
asset_event,
(
AssetMaterialization,
AssetObservation,
AssetCheckEvaluation,
FreshnessStateEvaluation,
FreshnessStateChange,
),
):
from dagster._core.errors import DagsterInvariantViolationError
raise DagsterInvariantViolationError(
f"Received unexpected asset event type {asset_event}, expected"
" AssetMaterialization, AssetObservation, AssetCheckEvaluation, FreshnessStateEvaluation or FreshnessStateChange"
)
return self._report_runless_asset_event(asset_event)
def wipe_assets(self, asset_keys: Sequence["AssetKey"]) -> None:
"""Wipes asset event history from the event log for the given asset keys.
Moved from AssetDomain.wipe_assets().
Args:
asset_keys (Sequence[AssetKey]): Asset keys to wipe.
"""
from dagster._core.events import AssetWipedData, DagsterEvent, DagsterEventType
from dagster._core.instance.utils import RUNLESS_JOB_NAME, RUNLESS_RUN_ID
check.list_param(asset_keys, "asset_keys", of_type=AssetKey)
for asset_key in asset_keys:
self._event_storage_impl.wipe_asset(asset_key)
self.report_dagster_event( # type: ignore[attr-defined]
DagsterEvent(
event_type_value=DagsterEventType.ASSET_WIPED.value,
event_specific_data=AssetWipedData(asset_key=asset_key, partition_keys=None),
job_name=RUNLESS_JOB_NAME,
),
run_id=RUNLESS_RUN_ID,
)
|
AssetMethods
|
python
|
walkccc__LeetCode
|
solutions/73. Set Matrix Zeroes/73.py
|
{
"start": 0,
"end": 833
}
|
class ____:
def setZeroes(self, matrix: list[list[int]]) -> None:
m = len(matrix)
n = len(matrix[0])
shouldFillFirstRow = 0 in matrix[0]
shouldFillFirstCol = 0 in list(zip(*matrix))[0]
# Store the information in the first row and the first column.
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
# Fill 0s for the matrix except the first row and the first column.
for i in range(1, m):
for j in range(1, n):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
# Fill 0s for the first row if needed.
if shouldFillFirstRow:
matrix[0] = [0] * n
# Fill 0s for the first column if needed.
if shouldFillFirstCol:
for row in matrix:
row[0] = 0
|
Solution
|
python
|
python__mypy
|
mypy/stubtest.py
|
{
"start": 1186,
"end": 1445
}
|
class ____:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING: Final = Missing()
T = TypeVar("T")
MaybeMissing: typing_extensions.TypeAlias = T | Missing
|
Missing
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 10617,
"end": 10930
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
return float(metafeatures["NumberOfFeatures"](X, y, logger).value) / float(
metafeatures["NumberOfInstances"](X, y, logger).value
)
@metafeatures.define("LogDatasetRatio", dependency="DatasetRatio")
|
DatasetRatio
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/expectation_configuration.py
|
{
"start": 3593,
"end": 21536
}
|
class ____(SerializableDictDot):
"""Defines the parameters and name of a specific Expectation.
Args:
type: The name of the expectation class to use in snake case, e.g. `expect_column_values_to_not_be_null`.
kwargs: The keyword arguments to pass to the expectation class.
meta: A dictionary of metadata to attach to the expectation.
notes: Notes about this expectation.
description: The description of the expectation. This will be rendered instead of the default template.
severity: The severity of the expectation failure.
success_on_last_run: Whether the expectation succeeded on the last run.
id: The corresponding GX Cloud ID for the expectation.
expectation_context: The context for the expectation.
rendered_content: Rendered content for the expectation.
Raises:
InvalidExpectationConfigurationError: If `expectation_type` arg is not a str.
InvalidExpectationConfigurationError: If `kwargs` arg is not a dict.
InvalidExpectationKwargsError: If domain kwargs are missing.
ValueError: If a `domain_type` cannot be determined.
""" # noqa: E501 # FIXME CoP
runtime_kwargs: ClassVar[tuple[str, ...]] = (
"result_format",
"catch_exceptions",
)
def __init__( # noqa: PLR0913 # FIXME CoP
self,
type: str,
kwargs: dict,
meta: Optional[dict] = None,
notes: str | list[str] | None = None,
description: str | None = None,
severity: FailureSeverity = FailureSeverity.CRITICAL,
success_on_last_run: Optional[bool] = None,
id: Optional[str] = None,
expectation_context: Optional[ExpectationContext] = None,
rendered_content: Optional[List[RenderedAtomicContent]] = None,
) -> None:
if not isinstance(type, str):
raise InvalidExpectationConfigurationError("expectation_type must be a string") # noqa: TRY003 # FIXME CoP
self._type = type
if not isinstance(kwargs, dict):
raise InvalidExpectationConfigurationError( # noqa: TRY003 # FIXME CoP
"expectation configuration kwargs must be a dict."
)
self._kwargs = kwargs
# the kwargs before suite parameters are evaluated
self._raw_kwargs: dict[str, Any] | None = None
if meta is None:
meta = {}
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
self.notes = notes
self.description = description
self.severity = severity
self.success_on_last_run = success_on_last_run
self._id = id
self._expectation_context = expectation_context
self._rendered_content = rendered_content
def process_suite_parameters(
self,
suite_parameters,
interactive_evaluation: bool = True,
data_context: Optional[AbstractDataContext] = None,
) -> None:
if not self._raw_kwargs:
suite_args, _ = build_suite_parameters(
expectation_args=self._kwargs,
suite_parameters=suite_parameters,
interactive_evaluation=interactive_evaluation,
data_context=data_context,
)
self._raw_kwargs = self._kwargs
self._kwargs = suite_args
else:
logger.debug("suite_parameters have already been built on this expectation")
def get_raw_configuration(self) -> ExpectationConfiguration:
# return configuration without substituted suite parameters
raw_config = deepcopy(self)
if raw_config._raw_kwargs is not None:
raw_config._kwargs = raw_config._raw_kwargs
raw_config._raw_kwargs = None
return raw_config
@property
def id(self) -> Optional[str]:
return self._id
@id.setter
def id(self, value: str) -> None:
self._id = value
@property
def expectation_context(self) -> Optional[ExpectationContext]:
return self._expectation_context
@expectation_context.setter
def expectation_context(self, value: ExpectationContext) -> None:
self._expectation_context = value
@property
def type(self) -> str:
return self._type
@property
def kwargs(self) -> dict:
return self._kwargs
@kwargs.setter
def kwargs(self, value: dict) -> None:
self._kwargs = value
@property
def rendered_content(self) -> Optional[List[RenderedAtomicContent]]:
return self._rendered_content
@rendered_content.setter
def rendered_content(self, value: Optional[List[RenderedAtomicContent]]) -> None:
self._rendered_content = value
@property
def severity(self) -> FailureSeverity:
return self._severity
@severity.setter
def severity(self, value: Union[FailureSeverity, str]) -> None:
# Convert string severity to enum and validate
if isinstance(value, str):
try:
self._severity = FailureSeverity(value)
except ValueError:
valid_values = ", ".join([member.value for member in FailureSeverity])
raise InvalidExpectationConfigurationError( # noqa: TRY003
f"Invalid severity '{value}'. Must be one of: {valid_values}"
)
else:
# Validate that it's a valid FailureSeverity enum
if not isinstance(value, FailureSeverity):
raise InvalidExpectationConfigurationError( # noqa: TRY003
f"Severity must be string or enum, got {type(value).__name__}"
)
self._severity = value
def _get_default_custom_kwargs(self) -> KWargDetailsDict:
# NOTE: this is a holdover until class-first expectations control their
# defaults, and so defaults are inherited.
if self.type.startswith("expect_column_pair"):
return {
"domain_kwargs": (
"column_A",
"column_B",
"row_condition",
"condition_parser",
),
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": (),
"default_kwarg_values": {
"column_A": None,
"column_B": None,
"row_condition": None,
"condition_parser": None,
},
}
elif self.type.startswith("expect_column"):
return {
"domain_kwargs": ("column", "row_condition", "condition_parser"),
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": (),
"default_kwarg_values": {
"column": None,
"row_condition": None,
"condition_parser": None,
},
}
logger.warning("Requested kwargs for an unrecognized expectation.")
return {
"domain_kwargs": (),
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": (),
"default_kwarg_values": {},
}
def get_domain_kwargs(self) -> dict:
default_kwarg_values: dict[str, Any] = {}
try:
impl = self._get_expectation_impl()
except ExpectationNotFoundError:
expectation_kwargs_dict = self._get_default_custom_kwargs()
domain_keys = expectation_kwargs_dict["domain_kwargs"]
else:
domain_keys = impl.domain_keys
default_kwarg_values = self._get_expectation_class_defaults()
domain_kwargs = {
key: self.kwargs.get(key, default_kwarg_values.get(key)) for key in domain_keys
}
missing_kwargs = set(domain_keys) - set(domain_kwargs.keys())
if missing_kwargs:
raise InvalidExpectationKwargsError(f"Missing domain kwargs: {list(missing_kwargs)}") # noqa: TRY003 # FIXME CoP
return domain_kwargs
def get_success_kwargs(self) -> dict:
"""Gets the success and domain kwargs for this ExpectationConfiguration.
Raises:
ExpectationNotFoundError: If the expectation implementation is not found.
Returns:
A dictionary with the success and domain kwargs of an expectation.
"""
default_kwarg_values: Mapping[str, str | bool | float | object | None]
try:
impl = self._get_expectation_impl()
except ExpectationNotFoundError:
expectation_kwargs_dict = self._get_default_custom_kwargs()
default_kwarg_values = expectation_kwargs_dict.get("default_kwarg_values", {})
success_keys = expectation_kwargs_dict["success_kwargs"]
else:
success_keys = impl.success_keys
default_kwarg_values = self._get_expectation_class_defaults()
domain_kwargs = self.get_domain_kwargs()
success_kwargs = {
key: self.kwargs.get(key, default_kwarg_values.get(key)) for key in success_keys
}
success_kwargs.update(domain_kwargs)
return success_kwargs
def get_runtime_kwargs(self, runtime_configuration: Optional[dict] = None) -> dict:
runtime_keys: tuple[str, ...]
default_kwarg_values: Mapping[str, str | bool | float | object | None]
try:
impl = self._get_expectation_impl()
except ExpectationNotFoundError:
expectation_kwargs_dict = self._get_default_custom_kwargs()
default_kwarg_values = expectation_kwargs_dict.get("default_kwarg_values", {})
runtime_keys = self.runtime_kwargs
else:
runtime_keys = impl.runtime_keys
default_kwarg_values = self._get_expectation_class_defaults()
success_kwargs = self.get_success_kwargs()
lookup_kwargs = deepcopy(self.kwargs)
if runtime_configuration:
lookup_kwargs.update(runtime_configuration)
runtime_kwargs = {
key: lookup_kwargs.get(key, default_kwarg_values.get(key)) for key in runtime_keys
}
result_format = runtime_kwargs["result_format"]
if result_format is not None:
runtime_kwargs["result_format"] = parse_result_format(result_format)
runtime_kwargs.update(success_kwargs)
return runtime_kwargs
def applies_to_same_domain(
self, other_expectation_configuration: ExpectationConfiguration
) -> bool:
if self.type != other_expectation_configuration.type:
return False
return self.get_domain_kwargs() == other_expectation_configuration.get_domain_kwargs()
# noinspection PyPep8Naming
def isEquivalentTo(
self,
other: Union[dict, ExpectationConfiguration],
match_type: str = "success",
) -> bool:
"""ExpectationConfiguration equivalence does not include meta, and relies on *equivalence* of kwargs.""" # noqa: E501 # FIXME CoP
if not isinstance(other, self.__class__):
if isinstance(other, dict):
try:
# noinspection PyNoneFunctionAssignment
other = expectationConfigurationSchema.load(other)
except ValidationError:
logger.debug(
"Unable to evaluate equivalence of ExpectationConfiguration object with dict because " # noqa: E501 # FIXME CoP
"dict other could not be instantiated as an ExpectationConfiguration"
)
return NotImplemented
else:
# Delegate comparison to the other instance
return NotImplemented
if match_type == "domain":
return all(
(
self.type == other.type, # type: ignore[union-attr] # could be dict
self.get_domain_kwargs() == other.get_domain_kwargs(), # type: ignore[union-attr] # could be dict
)
)
if match_type == "success":
return all(
(
self.type == other.type, # type: ignore[union-attr] # could be dict
self.get_success_kwargs() == other.get_success_kwargs(), # type: ignore[union-attr] # could be dict
)
)
if match_type == "runtime":
return all(
(
self.type == other.type, # type: ignore[union-attr] # could be dict
self.kwargs == other.kwargs, # type: ignore[union-attr] # could be dict
)
)
return False
@override
def __eq__(self, other):
"""ExpectationConfiguration equality does include meta, but ignores instance identity."""
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
this_kwargs: dict = convert_to_json_serializable(self.kwargs)
other_kwargs: dict = convert_to_json_serializable(other.kwargs)
this_meta: dict = convert_to_json_serializable(self.meta)
other_meta: dict = convert_to_json_serializable(other.meta)
return all(
(
self.type == other.type,
this_kwargs == other_kwargs,
this_meta == other_meta,
)
)
@override
def __hash__(self) -> int:
this_kwargs: dict = convert_to_json_serializable(self.kwargs)
this_meta: dict = convert_to_json_serializable(self.meta)
def make_hashable(obj):
"""Convert unhashable types to hashable ones recursively."""
if isinstance(obj, (str, int, float, bool, type(None))):
return obj
elif isinstance(obj, list):
return tuple(make_hashable(item) for item in obj)
elif isinstance(obj, dict):
return tuple(sorted((k, make_hashable(v)) for k, v in obj.items()))
else:
return str(obj)
return hash(
(
self.type,
make_hashable(this_kwargs),
make_hashable(this_meta),
)
)
def __ne__(self, other): # type: ignore[explicit-override] # FIXME
# By using the == operator, the returned NotImplemented is handled correctly.
return not self == other
def __repr__(self): # type: ignore[explicit-override] # FIXME
return json.dumps(self.to_json_dict())
@override
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
@override
def to_json_dict(self) -> Dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this ExpectationConfiguration.
Returns:
A JSON-serializable dict representation of this ExpectationConfiguration.
"""
myself = expectationConfigurationSchema.dump(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed # noqa: E501 # FIXME CoP
# schemas to get serialization all-the-way down via dump
myself["kwargs"] = convert_to_json_serializable(myself["kwargs"])
# Post dump hook removes this value if null so we need to ensure applicability before conversion # noqa: E501 # FIXME CoP
if "expectation_context" in myself:
myself["expectation_context"] = convert_to_json_serializable(
myself["expectation_context"]
)
if "rendered_content" in myself:
myself["rendered_content"] = convert_to_json_serializable(myself["rendered_content"])
return myself
def _get_expectation_impl(self) -> Type[Expectation]:
return get_expectation_impl(self.type)
def to_domain_obj(self) -> Expectation:
expectation_impl = self._get_expectation_impl()
kwargs: dict[Any, Any] = {
"id": self.id,
"meta": self.meta,
"notes": self.notes,
"rendered_content": self.rendered_content,
"severity": self.severity,
}
# it's possible description could be subclassed as a class variable,
# because we have documented it that way in the past.
# if that is the case, passing a self.description of any type would raise an error
# we can't check for the presence of expectation_impl.description
# because _get_expectation_impl() only returns registered expectations
if self.description:
kwargs.update({"description": self.description})
kwargs.update(self.kwargs)
return expectation_impl(**kwargs)
def get_domain_type(self) -> MetricDomainTypes:
"""Return "domain_type" of this expectation."""
if self.type.startswith("expect_table_"):
return MetricDomainTypes.TABLE
if "column" in self.kwargs:
return MetricDomainTypes.COLUMN
if "column_A" in self.kwargs and "column_B" in self.kwargs:
return MetricDomainTypes.COLUMN_PAIR
if "column_list" in self.kwargs:
return MetricDomainTypes.MULTICOLUMN
raise ValueError( # noqa: TRY003 # FIXME CoP
'Unable to determine "domain_type" of this "ExpectationConfiguration" object from "kwargs" and heuristics.' # noqa: E501 # FIXME CoP
)
def _get_expectation_class_defaults(self) -> dict[str, Any]:
cls = self._get_expectation_impl()
return {
name: field.default if not field.required else None
for name, field in cls.__fields__.items()
}
|
ExpectationConfiguration
|
python
|
aio-libs__aiohttp
|
aiohttp/worker.py
|
{
"start": 7472,
"end": 7813
}
|
class ____(GunicornWebWorker):
def init_process(self) -> None:
import uvloop
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
|
GunicornUVLoopWebWorker
|
python
|
facebookresearch__faiss
|
tests/test_index.py
|
{
"start": 535,
"end": 734
}
|
class ____(unittest.TestCase):
def test_version_attribute(self):
assert hasattr(faiss, '__version__')
assert re.match('^\\d+\\.\\d+\\.\\d+$', faiss.__version__)
|
TestModuleInterface
|
python
|
ray-project__ray
|
python/ray/train/lightgbm/_lightgbm_utils.py
|
{
"start": 381,
"end": 4744
}
|
class ____:
CHECKPOINT_NAME = "model.txt"
def __init__(
self,
metrics: Optional[Union[str, List[str], Dict[str, str]]] = None,
filename: str = CHECKPOINT_NAME,
frequency: int = 0,
checkpoint_at_end: bool = True,
results_postprocessing_fn: Optional[
Callable[[Dict[str, Union[float, List[float]]]], Dict[str, float]]
] = None,
):
if isinstance(metrics, str):
metrics = [metrics]
self._metrics = metrics
self._filename = filename
self._frequency = frequency
self._checkpoint_at_end = checkpoint_at_end
self._results_postprocessing_fn = results_postprocessing_fn
@classmethod
def get_model(
cls, checkpoint: Checkpoint, filename: str = CHECKPOINT_NAME
) -> Booster:
"""Retrieve the model stored in a checkpoint reported by this callback.
Args:
checkpoint: The checkpoint object returned by a training run.
The checkpoint should be saved by an instance of this callback.
filename: The filename to load the model from, which should match
the filename used when creating the callback.
Returns:
The model loaded from the checkpoint.
"""
with checkpoint.as_directory() as checkpoint_path:
return Booster(model_file=Path(checkpoint_path, filename).as_posix())
def _get_report_dict(self, evals_log: Dict[str, Dict[str, list]]) -> dict:
result_dict = flatten_dict(evals_log, delimiter="-")
if not self._metrics:
report_dict = result_dict
else:
report_dict = {}
for key in self._metrics:
if isinstance(self._metrics, dict):
metric = self._metrics[key]
else:
metric = key
report_dict[key] = result_dict[metric]
if self._results_postprocessing_fn:
report_dict = self._results_postprocessing_fn(report_dict)
return report_dict
def _get_eval_result(self, env: CallbackEnv) -> dict:
eval_result = {}
for entry in env.evaluation_result_list:
data_name, eval_name, result = entry[0:3]
if len(entry) > 4:
stdv = entry[4]
suffix = "-mean"
else:
stdv = None
suffix = ""
if data_name not in eval_result:
eval_result[data_name] = {}
eval_result[data_name][eval_name + suffix] = result
if stdv is not None:
eval_result[data_name][eval_name + "-stdv"] = stdv
return eval_result
@abstractmethod
def _get_checkpoint(self, model: Booster) -> Optional[Checkpoint]:
"""Get checkpoint from model.
This method needs to be implemented by subclasses.
"""
raise NotImplementedError
@abstractmethod
def _save_and_report_checkpoint(self, report_dict: Dict, model: Booster):
"""Save checkpoint and report metrics corresonding to this checkpoint.
This method needs to be implemented by subclasses.
"""
raise NotImplementedError
@abstractmethod
def _report_metrics(self, report_dict: Dict):
"""Report Metrics.
This method needs to be implemented by subclasses.
"""
raise NotImplementedError
def __call__(self, env: CallbackEnv) -> None:
eval_result = self._get_eval_result(env)
report_dict = self._get_report_dict(eval_result)
# Ex: if frequency=2, checkpoint_at_end=True and num_boost_rounds=11,
# you will checkpoint at iterations 1, 3, 5, ..., 9, and 10 (checkpoint_at_end)
# (iterations count from 0)
on_last_iter = env.iteration == env.end_iteration - 1
should_checkpoint_at_end = on_last_iter and self._checkpoint_at_end
should_checkpoint_with_frequency = (
self._frequency != 0 and (env.iteration + 1) % self._frequency == 0
)
should_checkpoint = should_checkpoint_at_end or should_checkpoint_with_frequency
if should_checkpoint:
self._save_and_report_checkpoint(report_dict, env.model)
else:
self._report_metrics(report_dict)
@PublicAPI(stability="beta")
|
RayReportCallback
|
python
|
pytransitions__transitions
|
transitions/extensions/diagrams_mermaid.py
|
{
"start": 9593,
"end": 11085
}
|
class ____:
def __init__(self, source):
self.source = source
# pylint: disable=redefined-builtin,unused-argument
def draw(self, filename, format=None, prog="dot", args=""):
"""
Generates and saves an image of the state machine using graphviz. Note that `prog` and `args` are only part
of the signature to mimic `Agraph.draw` and thus allow to easily switch between graph backends.
Args:
filename (str or file descriptor or stream or None): path and name of image output, file descriptor,
stream object or None
format (str): ignored
prog (str): ignored
args (str): ignored
Returns:
None or str: Returns a binary string of the graph when the first parameter (`filename`) is set to None.
"""
if filename is None:
return self.source
if isinstance(filename, str):
with open(filename, "w") as f:
f.write(self.source)
else:
filename.write(self.source.encode())
return None
invalid = {"style", "shape", "peripheries", "strict", "directed"}
convertible = {"fillcolor": "fill", "rankdir": "direction"}
def _to_mermaid(style_attrs, sep):
params = []
for k, v in style_attrs.items():
if k in invalid:
continue
if k in convertible:
k = convertible[k]
params.append("{}{}{}".format(k, sep, v))
return params
|
DigraphMock
|
python
|
google__pytype
|
pytype/tools/xref/testdata/nested_class.py
|
{
"start": 101,
"end": 691
}
|
class ____:
#- @B defines/binding ClassB
#- ClassB.node/kind record
#- ClassB.subkind class
class B:
#- @foo defines/binding FnFoo
#- @self defines/binding ArgBSelf
#- FnFoo.node/kind function
#- FnFoo param.0 ArgBSelf
def foo(self):
pass
#- @bar defines/binding FnBar
#- @self defines/binding ArgASelf
#- FnBar.node/kind function
#- FnBar param.0 ArgASelf
def bar(self):
#- @B ref ClassB
return self.B()
#- @A ref ClassA
#- @B ref ClassB
#- @foo ref FnFoo
A.B().foo()
#- @A ref ClassA
#- @bar ref FnBar
#- @foo ref FnFoo
A().bar().foo()
|
A
|
python
|
boto__boto3
|
tests/unit/dynamodb/test_transform.py
|
{
"start": 18618,
"end": 21057
}
|
class ____(unittest.TestCase):
def setUp(self):
self.events = mock.Mock()
self.client = mock.Mock()
self.client.meta.events = self.events
self.meta = ResourceMeta('dynamodb')
def test_instantiation(self):
# Instantiate the class.
dynamodb_class = type(
'dynamodb',
(DynamoDBHighLevelResource, ServiceResource),
{'meta': self.meta},
)
with mock.patch(
'boto3.dynamodb.transform.TransformationInjector'
) as mock_injector:
with mock.patch(
'boto3.dynamodb.transform.DocumentModifiedShape.'
'replace_documentation_for_matching_shape'
) as mock_modify_documentation_method:
dynamodb_class(client=self.client)
# It should have fired the following events upon instantiation.
event_call_args = self.events.register.call_args_list
assert event_call_args == [
mock.call(
'provide-client-params.dynamodb',
copy_dynamodb_params,
unique_id='dynamodb-create-params-copy',
),
mock.call(
'before-parameter-build.dynamodb',
mock_injector.return_value.inject_condition_expressions,
unique_id='dynamodb-condition-expression',
),
mock.call(
'before-parameter-build.dynamodb',
mock_injector.return_value.inject_attribute_value_input,
unique_id='dynamodb-attr-value-input',
),
mock.call(
'after-call.dynamodb',
mock_injector.return_value.inject_attribute_value_output,
unique_id='dynamodb-attr-value-output',
),
mock.call(
'docs.*.dynamodb.*.complete-section',
mock_modify_documentation_method,
unique_id='dynamodb-attr-value-docs',
),
mock.call(
'docs.*.dynamodb.*.complete-section',
mock_modify_documentation_method,
unique_id='dynamodb-key-expression-docs',
),
mock.call(
'docs.*.dynamodb.*.complete-section',
mock_modify_documentation_method,
unique_id='dynamodb-cond-expression-docs',
),
]
|
TestDynamoDBHighLevelResource
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_callback.py
|
{
"start": 2014,
"end": 3771
}
|
class ____:
@pytest.mark.parametrize(
("callback_def", "expected_cb_instance"),
[
pytest.param(
TEST_ASYNC_CALLBACK, TriggererCallback(callback_def=TEST_ASYNC_CALLBACK), id="triggerer"
),
pytest.param(
TEST_SYNC_CALLBACK,
ExecutorCallback(
callback_def=TEST_SYNC_CALLBACK, fetch_method=CallbackFetchMethod.IMPORT_PATH
),
id="executor",
),
],
)
def test_create_from_sdk_def(self, callback_def, expected_cb_instance):
returned_cb = Callback.create_from_sdk_def(callback_def)
assert isinstance(returned_cb, type(expected_cb_instance))
assert returned_cb.data == expected_cb_instance.data
def test_create_from_sdk_def_unknown_type(self):
"""Test that unknown callback type raises ValueError"""
class UnknownCallback:
pass
unknown_callback = UnknownCallback()
with pytest.raises(ValueError, match="Cannot handle Callback of type"):
Callback.create_from_sdk_def(unknown_callback)
def test_get_metric_info(self):
callback = TriggererCallback(TEST_ASYNC_CALLBACK, prefix="deadline_alerts", dag_id=TEST_DAG_ID)
callback.data["kwargs"] = {"context": {"dag_id": TEST_DAG_ID}, "email": "test@example.com"}
metric_info = callback.get_metric_info(CallbackState.SUCCESS, "0")
assert metric_info["stat"] == "deadline_alerts.callback_success"
assert metric_info["tags"] == {
"result": "0",
"path": TEST_ASYNC_CALLBACK.path,
"kwargs": {"email": "test@example.com"},
"dag_id": TEST_DAG_ID,
}
|
TestCallback
|
python
|
simonw__datasette
|
datasette/views/database.py
|
{
"start": 34469,
"end": 36010
}
|
class ____(dict):
def __init__(self, sql, data, request, datasette):
super().__init__(data)
self._sql = sql
self._request = request
self._magics = dict(
itertools.chain.from_iterable(
pm.hook.register_magic_parameters(datasette=datasette)
)
)
self._prepared = {}
async def execute_params(self):
for key in derive_named_parameters(self._sql):
if key.startswith("_") and key.count("_") >= 2:
prefix, suffix = key[1:].split("_", 1)
if prefix in self._magics:
result = await await_me_maybe(
self._magics[prefix](suffix, self._request)
)
self._prepared[key] = result
def __len__(self):
# Workaround for 'Incorrect number of bindings' error
# https://github.com/simonw/datasette/issues/967#issuecomment-692951144
return super().__len__() or 1
def __getitem__(self, key):
if key.startswith("_") and key.count("_") >= 2:
if key in self._prepared:
return self._prepared[key]
# Try the other route
prefix, suffix = key[1:].split("_", 1)
if prefix in self._magics:
try:
return self._magics[prefix](suffix, self._request)
except KeyError:
return super().__getitem__(key)
else:
return super().__getitem__(key)
|
MagicParameters
|
python
|
jupyterlab__jupyterlab
|
examples/cell/main.py
|
{
"start": 1667,
"end": 2542
}
|
class ____(LabServerApp):
extension_url = "/example"
default_url = "/example"
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = "JupyterLab Example Cell"
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
app_version = version
app_settings_dir = os.path.join(HERE, "build", "application_settings")
schemas_dir = os.path.join(HERE, "build", "schemas")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list."""
self.handlers.append(("/example", ExampleHandler))
if __name__ == "__main__":
ExampleApp.launch_instance()
|
ExampleApp
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 9202,
"end": 11211
}
|
class ____(_scale_color_continuous):
"""
Sequential and diverging continuous color scales
This is a convenience scale around
[](`~plotnine.scales.scale_color_gradientn`) with colors from
[colorbrewer.org](http://colorbrewer2.org). It smoothly
interpolates 7 colors from a brewer palette to create a
continuous palette.
"""
type: InitVar[
Literal[
"diverging",
"qualitative",
"sequential",
"div",
"qual",
"seq",
]
] = "seq"
"""
Type of data
"""
palette: InitVar[int | str] = 1
"""
If a string, will use that named palette. If a number, will index
into the list of palettes of appropriate type.
"""
values: InitVar[Sequence[float] | None] = None
"""
List of points in the range [0, 1] at which to place each color.
Must be the same size as `colors`. Default to evenly space the colors
"""
direction: InitVar[Literal[1, -1]] = 1
"""
Sets the order of colors in the scale. If 1, colors are as output
[](`~mizani.palettes.brewer_pal`). If -1, the order of colors is
reversed.
"""
def __post_init__(self, type, palette, values, direction):
"""
Create colormap that will be used by the palette
"""
from mizani.palettes import brewer_pal, gradient_n_pal
if type.lower() in ("qual", "qualitative"):
warn(
"Using a discrete color palette in a continuous scale."
"Consider using type = 'seq' or type = 'div' instead",
PlotnineWarning,
)
# Grab 7 colors from brewer and create a gradient palette
# An odd number matches the midpoint of the palette to that
# of the data
super().__post_init__()
colors = brewer_pal(type, palette, direction=direction)(7)
self.palette = gradient_n_pal(colors, values) # type: ignore
@dataclass
|
scale_color_distiller
|
python
|
getsentry__sentry-python
|
tests/integrations/grpc/test_grpc.py
|
{
"start": 6857,
"end": 10919
}
|
class ____(grpc.UnaryUnaryClientInterceptor):
call_counter = 0
def intercept_unary_unary(self, continuation, client_call_details, request):
self.__class__.call_counter += 1
return continuation(client_call_details, request)
@pytest.mark.forked
def test_grpc_client_other_interceptor(sentry_init, capture_events_forksafe):
"""Ensure compatibility with additional client interceptors."""
sentry_init(traces_sample_rate=1.0, integrations=[GRPCIntegration()])
events = capture_events_forksafe()
server, channel = _set_up()
# Intercept the channel
channel = grpc.intercept_channel(channel, MockClientInterceptor())
stub = gRPCTestServiceStub(channel)
with start_transaction():
stub.TestServe(gRPCTestMessage(text="test"))
_tear_down(server=server)
assert MockClientInterceptor.call_counter == 1
events.write_file.close()
events.read_event()
local_transaction = events.read_event()
span = local_transaction["spans"][0]
assert len(local_transaction["spans"]) == 1
assert span["op"] == OP.GRPC_CLIENT
assert (
span["description"]
== "unary unary call to /grpc_test_server.gRPCTestService/TestServe"
)
assert span["data"] == ApproxDict(
{
"type": "unary unary",
"method": "/grpc_test_server.gRPCTestService/TestServe",
"code": "OK",
}
)
@pytest.mark.forked
def test_grpc_client_and_servers_interceptors_integration(
sentry_init, capture_events_forksafe
):
sentry_init(traces_sample_rate=1.0, integrations=[GRPCIntegration()])
events = capture_events_forksafe()
server, channel = _set_up()
# Use the provided channel
stub = gRPCTestServiceStub(channel)
with start_transaction():
stub.TestServe(gRPCTestMessage(text="test"))
_tear_down(server=server)
events.write_file.close()
server_transaction = events.read_event()
local_transaction = events.read_event()
assert (
server_transaction["contexts"]["trace"]["trace_id"]
== local_transaction["contexts"]["trace"]["trace_id"]
)
@pytest.mark.forked
def test_stream_stream(sentry_init):
sentry_init(traces_sample_rate=1.0, integrations=[GRPCIntegration()])
server, channel = _set_up()
# Use the provided channel
stub = gRPCTestServiceStub(channel)
response_iterator = stub.TestStreamStream(iter((gRPCTestMessage(text="test"),)))
for response in response_iterator:
assert response.text == "test"
_tear_down(server=server)
@pytest.mark.forked
def test_stream_unary(sentry_init):
"""
Test to verify stream-stream works.
Tracing not supported for it yet.
"""
sentry_init(traces_sample_rate=1.0, integrations=[GRPCIntegration()])
server, channel = _set_up()
# Use the provided channel
stub = gRPCTestServiceStub(channel)
response = stub.TestStreamUnary(iter((gRPCTestMessage(text="test"),)))
assert response.text == "test"
_tear_down(server=server)
@pytest.mark.forked
def test_span_origin(sentry_init, capture_events_forksafe):
sentry_init(traces_sample_rate=1.0, integrations=[GRPCIntegration()])
events = capture_events_forksafe()
server, channel = _set_up()
# Use the provided channel
stub = gRPCTestServiceStub(channel)
with start_transaction(name="custom_transaction"):
stub.TestServe(gRPCTestMessage(text="test"))
_tear_down(server=server)
events.write_file.close()
transaction_from_integration = events.read_event()
custom_transaction = events.read_event()
assert (
transaction_from_integration["contexts"]["trace"]["origin"] == "auto.grpc.grpc"
)
assert (
transaction_from_integration["spans"][0]["origin"]
== "auto.grpc.grpc.TestService"
) # manually created in TestService, not the instrumentation
assert custom_transaction["contexts"]["trace"]["origin"] == "manual"
assert custom_transaction["spans"][0]["origin"] == "auto.grpc.grpc"
|
MockClientInterceptor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1478503,
"end": 1484857
}
|
class ____(sgqlc.types.Type, Node):
"""A GitHub Security Advisory"""
__schema__ = github_schema
__field_names__ = (
"classification",
"cvss",
"cwes",
"database_id",
"description",
"ghsa_id",
"identifiers",
"notifications_permalink",
"origin",
"permalink",
"published_at",
"references",
"severity",
"summary",
"updated_at",
"vulnerabilities",
"withdrawn_at",
)
classification = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisoryClassification), graphql_name="classification")
"""The classification of the advisory"""
cvss = sgqlc.types.Field(sgqlc.types.non_null(CVSS), graphql_name="cvss")
"""The CVSS associated with this advisory"""
cwes = sgqlc.types.Field(
sgqlc.types.non_null(CWEConnection),
graphql_name="cwes",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""CWEs associated with this Advisory
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="description")
"""This is a long plaintext description of the advisory"""
ghsa_id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="ghsaId")
"""The GitHub Security Advisory ID"""
identifiers = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(SecurityAdvisoryIdentifier))), graphql_name="identifiers"
)
"""A list of identifiers for this advisory"""
notifications_permalink = sgqlc.types.Field(URI, graphql_name="notificationsPermalink")
"""The permalink for the advisory's dependabot alerts page"""
origin = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="origin")
"""The organization that originated the advisory"""
permalink = sgqlc.types.Field(URI, graphql_name="permalink")
"""The permalink for the advisory"""
published_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="publishedAt")
"""When the advisory was published"""
references = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(SecurityAdvisoryReference))), graphql_name="references"
)
"""A list of references for this advisory"""
severity = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisorySeverity), graphql_name="severity")
"""The severity of the advisory"""
summary = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="summary")
"""A short plaintext summary of the advisory"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""When the advisory was last updated"""
vulnerabilities = sgqlc.types.Field(
sgqlc.types.non_null(SecurityVulnerabilityConnection),
graphql_name="vulnerabilities",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(
SecurityVulnerabilityOrder, graphql_name="orderBy", default={"field": "UPDATED_AT", "direction": "DESC"}
),
),
("ecosystem", sgqlc.types.Arg(SecurityAdvisoryEcosystem, graphql_name="ecosystem", default=None)),
("package", sgqlc.types.Arg(String, graphql_name="package", default=None)),
(
"severities",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(SecurityAdvisorySeverity)), graphql_name="severities", default=None
),
),
(
"classifications",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(SecurityAdvisoryClassification)),
graphql_name="classifications",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Vulnerabilities associated with this Advisory
Arguments:
* `order_by` (`SecurityVulnerabilityOrder`): Ordering options for
the returned topics. (default: `{field: UPDATED_AT, direction:
DESC}`)
* `ecosystem` (`SecurityAdvisoryEcosystem`): An ecosystem to
filter vulnerabilities by.
* `package` (`String`): A package name to filter vulnerabilities
by.
* `severities` (`[SecurityAdvisorySeverity!]`): A list of
severities to filter vulnerabilities by.
* `classifications` (`[SecurityAdvisoryClassification!]`): A list
of advisory classifications to filter vulnerabilities by.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
withdrawn_at = sgqlc.types.Field(DateTime, graphql_name="withdrawnAt")
"""When the advisory was withdrawn, if it has been withdrawn"""
|
SecurityAdvisory
|
python
|
pytorch__pytorch
|
test/functorch/dim/test_split.py
|
{
"start": 219,
"end": 16820
}
|
class ____(TestCase):
"""Comprehensive tests for first-class dimension split operations."""
def setUp(self):
"""Set up common test fixtures."""
self.batch, self.height, self.width = dims(3)
def test_dim_object_split_all_bound(self):
"""Test split with all Dim objects bound to specific sizes."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Create bound Dim objects
d1 = Dim("d1", 3)
d2 = Dim("d2", 4)
d3 = Dim("d3", 5)
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
# For FCD tensors, check the ordered version to verify shapes
self.assertEqual(result[0].order(x, d1, z).shape, (3, 3, 5))
self.assertEqual(result[1].order(x, d2, z).shape, (3, 4, 5))
self.assertEqual(result[2].order(x, d3, z).shape, (3, 5, 5))
# Verify dimensions are bound correctly
self.assertEqual(d1.size, 3)
self.assertEqual(d2.size, 4)
self.assertEqual(d3.size, 5)
def test_dim_object_split_unbound(self):
"""Test split with unbound Dim objects."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Create unbound Dim objects
d1 = Dim("d1")
d2 = Dim("d2")
d3 = Dim("d3")
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
# Should split evenly: 12 / 3 = 4 each
# Check via ordered tensors since FCD tensors have ndim=0
for i, part in enumerate(result):
if i == 0:
self.assertEqual(part.order(x, d1, z).shape, (3, 4, 5))
elif i == 1:
self.assertEqual(part.order(x, d2, z).shape, (3, 4, 5))
else:
self.assertEqual(part.order(x, d3, z).shape, (3, 4, 5))
# Verify dimensions are bound to chunk size
self.assertEqual(d1.size, 4)
self.assertEqual(d2.size, 4)
self.assertEqual(d3.size, 4)
def test_dim_object_split_mixed_bound_unbound(self):
"""Test split with mix of bound and unbound Dim objects."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Create mix of bound and unbound
d1 = Dim("d1", 3) # bound
d2 = Dim("d2") # unbound
d3 = Dim("d3", 2) # bound
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
self.assertEqual(result[0].order(x, d1, z).shape, (3, 3, 5))
self.assertEqual(result[1].order(x, d2, z).shape, (3, 7, 5)) # 12 - 3 - 2 = 7
self.assertEqual(result[2].order(x, d3, z).shape, (3, 2, 5))
# Verify unbound dimension was bound to remaining size
self.assertEqual(d2.size, 7)
def test_dim_object_split_multiple_unbound(self):
"""Test split with multiple unbound Dim objects."""
tensor = torch.randn(3, 15, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Create multiple unbound dimensions
d1 = Dim("d1", 3) # bound
d2 = Dim("d2") # unbound
d3 = Dim("d3") # unbound
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
self.assertEqual(result[0].order(x, d1, z).shape, (3, 3, 5))
# Remaining 12 should be split evenly between d2 and d3: 6 each
self.assertEqual(result[1].order(x, d2, z).shape, (3, 6, 5))
self.assertEqual(result[2].order(x, d3, z).shape, (3, 6, 5))
self.assertEqual(d2.size, 6)
self.assertEqual(d3.size, 6)
def test_dim_object_split_uneven_remainder(self):
"""Test split with unbound dimensions that don't divide evenly."""
tensor = torch.randn(3, 14, 5) # 14 doesn't divide evenly by 3
x, y, z = dims(3)
t = tensor[x, y, z]
d1 = Dim("d1", 3)
d2 = Dim("d2") # Should get ceil((14-3)/2) = 6
d3 = Dim("d3") # Should get remaining = 5
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
self.assertEqual(result[0].order(x, d1, z).shape, (3, 3, 5))
self.assertEqual(result[1].order(x, d2, z).shape, (3, 6, 5))
self.assertEqual(result[2].order(x, d3, z).shape, (3, 5, 5))
self.assertEqual(d2.size, 6)
self.assertEqual(d3.size, 5)
def test_split_with_dim_object_parameter(self):
"""Test split when dim parameter is a Dim object."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Use Dim object as the dim parameter
d1 = Dim("d1", 3)
d2 = Dim("d2", 4)
d3 = Dim("d3", 5)
result = t.split([d1, d2, d3], dim=y)
self.assertEqual(len(result), 3)
def test_error_mixed_types(self):
"""Test error when mixing integers and Dim objects in split sizes."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
d1 = Dim("d1", 3)
# Should raise TypeError for mixed types
with self.assertRaises(TypeError):
t.split([d1, 4, 5], dim=y)
with self.assertRaises(TypeError):
t.split([3, d1, 5], dim=y)
def test_error_dim_parameter_with_int_sizes(self):
"""Test error when dim parameter is Dim but sizes are integers."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Should raise TypeError when dim is Dim object but sizes are ints
with self.assertRaises(
TypeError,
msg="when dim is specified as a Dim object, split sizes must also be dimensions.",
):
t.split(3, dim=y)
with self.assertRaises(
TypeError,
msg="when dim is specified as a Dim object, split sizes must also be dimensions.",
):
t.split([3, 4, 5], dim=y)
def test_error_size_mismatch(self):
"""Test error when bound sizes don't match tensor dimension."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Bound dimensions that sum to wrong total
d1 = Dim("d1", 3)
d2 = Dim("d2", 4)
d3 = Dim("d3", 6) # 3 + 4 + 6 = 13, but tensor has 12
with self.assertRaises(TypeError):
t.split([d1, d2, d3], dim=y)
def test_error_bound_sizes_exceed_tensor(self):
"""Test error when bound sizes exceed tensor dimension."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Bound dimensions with one unbound, but bound sizes too large
d1 = Dim("d1", 8)
d2 = Dim("d2", 6) # 8 + 6 = 14 > 12
d3 = Dim("d3")
with self.assertRaises(TypeError):
t.split([d1, d2, d3], dim=y)
def test_error_nonexistent_dimension(self):
"""Test error when splitting on non-existent dimension."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
w = Dim("w") # Not in tensor
with self.assertRaises(TypeError):
t.split([Dim("d1"), Dim("d2")], dim=w)
def test_split_different_dims(self):
"""Test splitting along different dimensions."""
tensor = torch.randn(6, 8, 10)
x, y, z = dims(3)
t = tensor[x, y, z]
# Split along first dimension
a, b = Dim("a", 2), Dim("b", 4)
result1 = t.split([a, b], dim=x)
self.assertEqual(len(result1), 2)
self.assertEqual(result1[0].order(a, y, z).shape, (2, 8, 10))
self.assertEqual(result1[1].order(b, y, z).shape, (4, 8, 10))
# Split along last dimension
c, d = Dim("c", 3), Dim("d", 7)
result2 = t.split([c, d], dim=z)
self.assertEqual(len(result2), 2)
self.assertEqual(result2[0].order(x, y, c).shape, (6, 8, 3))
self.assertEqual(result2[1].order(x, y, d).shape, (6, 8, 7))
def test_split_single_dim_object(self):
"""Test split with single Dim object that matches tensor dimension size."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
# Use a single Dim object with size matching the dimension
d1 = Dim("d1", 12) # Must match the full size of y dimension
# Single Dim object in list should work when size matches
result = t.split([d1], dim=y)
self.assertEqual(len(result), 1) # Single chunk containing entire dimension
self.assertEqual(result[0].order(x, d1, z).shape, (3, 12, 5))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO,
"TorchDynamo doesn't preserve side effects during tracing",
)
def test_dimension_binding_consistency(self):
"""Test that split properly binds dimensions and they remain consistent."""
tensor = torch.randn(3, 15, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
d1 = Dim("d1")
d2 = Dim("d2")
d3 = Dim("d3")
# Split should bind dimensions
t.split([d1, d2, d3], dim=y)
# Use the bound dimensions in another operation
self.assertTrue(d1.is_bound)
self.assertTrue(d2.is_bound)
self.assertTrue(d3.is_bound)
# Dimensions should remain bound with same values
original_sizes = (d1.size, d2.size, d3.size)
# Try to use bound dimension again - should maintain same size
another_tensor = torch.randn(original_sizes[0], 4)
a = Dim("a")
t2 = another_tensor[d1, a] # d1 should still be bound to same size
self.assertEqual(t2.order(d1, a).shape, (original_sizes[0], 4))
def test_split_result_tensor_types(self):
"""Test that split results are proper first-class dimension tensors."""
tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = tensor[x, y, z]
d1 = Dim("d1", 4)
d2 = Dim("d2", 8)
result = t.split([d1, d2], dim=y)
# Results should be first-class dimension tensors
for part in result:
self.assertTrue(isinstance(part, (torch.Tensor, Tensor)))
# Should have dimensions from original tensor plus new split dimensions
if hasattr(part, "dims"):
# Check that the split dimension is in the result
dims_in_result = part.dims
self.assertTrue(len(dims_in_result) > 0)
def test_large_tensor_split(self):
"""Test split on larger tensors to verify performance and correctness."""
tensor = torch.randn(10, 100, 20)
x, y, z = dims(3)
t = tensor[x, y, z]
# Split into many small pieces
split_dims = [Dim(f"d{i}", 5) for i in range(20)] # 20 * 5 = 100
result = t.split(split_dims, dim=y)
self.assertEqual(len(result), 20)
for i, part in enumerate(result):
self.assertEqual(part.order(x, split_dims[i], z).shape, (10, 5, 20))
self.assertEqual(split_dims[i].size, 5)
def test_device_handling(self):
"""Test split behavior with different devices."""
if torch.cuda.is_available():
# Test on CUDA
cuda_tensor = torch.randn(3, 12, 5, device="cuda")
x, y, z = dims(3)
t = cuda_tensor[x, y, z]
d1, d2 = Dim("d1", 4), Dim("d2", 8)
result = t.split([d1, d2], dim=y)
for i, part in enumerate(result):
ordered = part.order(x, d1 if i == 0 else d2, z)
self.assertEqual(ordered.device.type, "cuda")
self.assertEqual(ordered.shape[0], 3)
self.assertEqual(ordered.shape[2], 5)
# Test on CPU
cpu_tensor = torch.randn(3, 12, 5)
x, y, z = dims(3)
t = cpu_tensor[x, y, z]
d1, d2 = Dim("d1", 4), Dim("d2", 8)
result = t.split([d1, d2], dim=y)
for i, part in enumerate(result):
ordered = part.order(x, d1 if i == 0 else d2, z)
self.assertEqual(ordered.device, torch.device("cpu"))
def test_split_preserves_dtype(self):
"""Test that split preserves tensor dtype."""
for dtype in [torch.float32, torch.float64, torch.int32, torch.int64]:
if dtype in [torch.int32, torch.int64]:
tensor = torch.randint(0, 10, (3, 12, 5), dtype=dtype)
else:
tensor = torch.randn(3, 12, 5, dtype=dtype)
x, y, z = dims(3)
t = tensor[x, y, z]
d1, d2 = Dim("d1", 4), Dim("d2", 8)
result = t.split([d1, d2], dim=y)
for i, part in enumerate(result):
ordered = part.order(x, d1 if i == 0 else d2, z)
self.assertEqual(ordered.dtype, dtype)
def test_split_with_requires_grad(self):
"""Test split with tensors that require gradients."""
tensor = torch.randn(3, 12, 5, requires_grad=True)
x, y, z = dims(3)
t = tensor[x, y, z]
d1, d2 = Dim("d1", 4), Dim("d2", 8)
result = t.split([d1, d2], dim=y)
for part in result:
# Check requires_grad on the ordered tensor to access the underlying tensor properties
self.assertTrue(
part.order(x, d1 if part is result[0] else d2, z).requires_grad
)
def test_edge_case_single_element_splits(self):
"""Test splitting into single-element chunks."""
tensor = torch.randn(3, 5, 4)
x, y, z = dims(3)
t = tensor[x, y, z]
# Split into 5 single-element pieces
split_dims = [Dim(f"d{i}", 1) for i in range(5)]
result = t.split(split_dims, dim=y)
self.assertEqual(len(result), 5)
for i, part in enumerate(result):
self.assertEqual(part.order(x, split_dims[i], z).shape, (3, 1, 4))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "TorchDynamo has issues with torch._tensor.split"
)
def test_split_function_directly(self):
"""Test that the standalone split function works correctly."""
from functorch.dim import split
# Test on regular tensor
tensor = torch.randn(3, 12, 5)
result = split(tensor, 4, dim=1)
self.assertEqual(len(result), 3) # 12 / 4 = 3
for part in result:
self.assertEqual(part.shape, (3, 4, 5))
# Test on FCD tensor with FCD arguments
x, y, z = dims(3)
fcd_tensor = tensor[x, y, z]
d1 = Dim("d1", 4)
d2 = Dim("d2", 8)
result = split(fcd_tensor, [d1, d2], dim=y)
self.assertEqual(len(result), 2)
self.assertEqual(result[0].order(x, d1, z).shape, (3, 4, 5))
self.assertEqual(result[1].order(x, d2, z).shape, (3, 8, 5))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO,
"TorchDynamo can't parse dims() without arguments from bytecode",
)
def test_split_on_plain_tensor_with_fcd_args(self):
"""Test that split() works on plain tensors when FCD arguments are provided."""
# Test the exact example from the user message
x, y = dims()
# Split a plain tensor with FCD dimensions as split sizes
result = torch.randn(8).split([x, y], dim=0)
self.assertEqual(len(result), 2)
# Both parts should be FCD tensors
for part in result:
self.assertTrue(isinstance(part, (torch.Tensor, Tensor)))
self.assertTrue(hasattr(part, "dims"))
# Check that the dimensions are bound correctly
self.assertIs(result[0].dims[0], x)
self.assertIs(result[1].dims[0], y)
self.assertEqual(x.size, 4) # 8 / 2 = 4 each
self.assertEqual(y.size, 4)
# Test with repeated dimensions
x2 = Dim("x2")
result2 = torch.randn(8).split([x2, x2], dim=0)
self.assertEqual(len(result2), 2)
self.assertEqual(x2.size, 4) # Both chunks should be size 4
def test_plain_tensor_regular_split_still_works(self):
"""Test that regular split on plain tensors still works without FCD args."""
tensor = torch.randn(3, 12, 5)
# Regular split without any FCD arguments should work normally
result = tensor.split(4, dim=1)
self.assertEqual(len(result), 3) # 12 / 4 = 3
for part in result:
self.assertEqual(part.shape, (3, 4, 5))
self.assertTrue(isinstance(part, torch.Tensor))
self.assertFalse(hasattr(part, "dims")) # Should be regular tensor
if __name__ == "__main__":
run_tests()
|
TestSplit
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/tokens.py
|
{
"start": 13234,
"end": 20068
}
|
class ____:
"""Generate JWT tokens."""
_private_key: AllowedPrivateKeys | None = attrs.field(
repr=False, alias="private_key", converter=_pem_to_key, factory=_load_key_from_configured_file
)
"""
Private key to sign generated tokens.
Should be either a private key object from the cryptography module, or a PEM-encoded byte string
"""
_secret_key: str | None = attrs.field(
repr=False,
alias="secret_key",
default=None,
converter=lambda v: None if v == "" else v,
)
"""A pre-shared secret key to sign tokens with symmetric encryption"""
kid: str = attrs.field(default=attrs.Factory(_generate_kid, takes_self=True))
valid_for: float
audience: str
issuer: str | list[str] | None = attrs.field(
factory=_conf_list_factory("api_auth", "jwt_issuer", first_only=True, fallback=None)
)
algorithm: str = attrs.field(
factory=_conf_list_factory("api_auth", "jwt_algorithm", first_only=True, fallback="GUESS")
)
def __attrs_post_init__(self):
if not (self._private_key is None) ^ (self._secret_key is None):
raise ValueError("Exactly one of private_key and secret_key must be specified")
if self.algorithm == "GUESS":
if self._private_key:
self.algorithm = _guess_best_algorithm(self._private_key)
else:
self.algorithm = "HS512"
@property
def signing_arg(self) -> AllowedPrivateKeys | str:
if callable(self._private_key):
return self._private_key()
if self._private_key:
return self._private_key
if TYPE_CHECKING:
# Already handled at in post_init
assert self._secret_key
return self._secret_key
def generate(self, extras: dict[str, Any] | None = None, headers: dict[str, Any] | None = None) -> str:
"""Generate a signed JWT for the subject."""
now = int(datetime.now(tz=timezone.utc).timestamp())
claims = {
"jti": uuid.uuid4().hex,
"iss": self.issuer,
"aud": self.audience,
"nbf": now,
"exp": int(now + self.valid_for),
"iat": now,
}
if claims["iss"] is None:
del claims["iss"]
if claims["aud"] is None:
del claims["aud"]
if extras is not None:
claims = extras | claims
headers = {"alg": self.algorithm, **(headers or {})}
if self._private_key:
headers["kid"] = self.kid
return jwt.encode(claims, self.signing_arg, algorithm=self.algorithm, headers=headers)
def generate_private_key(key_type: str = "RSA", key_size: int = 2048):
"""
Generate a valid private key for testing.
Args:
key_type (str): Type of key to generate. Can be "RSA" or "Ed25516". Defaults to "RSA".
key_size (int): Size of the key in bits. Only applicable for RSA keys. Defaults to 2048.
Returns:
tuple: A tuple containing the private key in PEM format and the corresponding public key in PEM format.
"""
from cryptography.hazmat.primitives.asymmetric import ed25519, rsa
if key_type == "RSA":
# Generate an RSA private key
return rsa.generate_private_key(public_exponent=65537, key_size=key_size, backend=default_backend())
if key_type == "Ed25519":
return ed25519.Ed25519PrivateKey.generate()
raise ValueError(f"unsupported key type: {key_type}")
def key_to_pem(key: AllowedPrivateKeys) -> bytes:
from cryptography.hazmat.primitives import serialization
# Serialize the private key in PEM format
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
def thumbprint(jwk: dict[str, Any], hashalg=hashes.SHA256()) -> str:
"""
Return the key thumbprint as specified by RFC 7638.
:param hashalg: A hash function (defaults to SHA256)
:return: A base64url encoded digest of the key
"""
digest = hashes.Hash(hashalg, backend=default_backend())
jsonstr = json.dumps(jwk, separators=(",", ":"), sort_keys=True)
digest.update(jsonstr.encode("utf8"))
return base64url_encode(digest.finalize())
def base64url_encode(payload):
if not isinstance(payload, bytes):
payload = payload.encode("utf-8")
encode = urlsafe_b64encode(payload)
return encode.decode("utf-8").rstrip("=")
def get_signing_key(section: str, key: str, make_secret_key_if_needed: bool = True) -> str:
"""
Get a signing shared key from the config.
If the config option is empty this will generate a random one and warn about the lack of it.
"""
from airflow.configuration import conf
sentinel = object()
secret_key = conf.get(section, key, fallback=sentinel)
if not secret_key or secret_key is sentinel:
if make_secret_key_if_needed:
log.warning(
"`%s/%s` was empty, using a generated one for now. Please set this in your config",
section,
key,
)
secret_key = base64url_encode(os.urandom(16))
# Set it back so any other callers get the same value for the duration of this process
conf.set(section, key, secret_key)
else:
raise ValueError(f"The value {section}/{key} must be set!")
# Mypy can't grock the `if not secret_key`
return secret_key
def get_signing_args(make_secret_key_if_needed: bool = True) -> dict[str, Any]:
"""
Return the args to splat into JWTGenerator for private or secret key.
Will use ``get_signing_key`` to generate a key if nothing else suitable is found.
"""
# Try private key first
priv = _load_key_from_configured_file()
if priv is not None:
return {"private_key": priv}
# Don't call this unless we have to as it might issue a warning
return {"secret_key": get_signing_key("api_auth", "jwt_secret", make_secret_key_if_needed)}
def get_sig_validation_args(make_secret_key_if_needed: bool = True) -> dict[str, Any]:
from airflow.configuration import conf
sentinel = object()
# Try JWKS url first
url = conf.get("api_auth", "trusted_jwks_url", fallback=sentinel)
if url and url is not sentinel:
jwks = JWKS(url=url)
return {"jwks": jwks}
key = _load_key_from_configured_file()
if key is not None:
jwks = JWKS.from_private_key(key)
return {
"jwks": jwks,
"algorithm": conf.get("api_auth", "jwt_algorithm", fallback=None) or _guess_best_algorithm(key),
}
return {"secret_key": get_signing_key("api_auth", "jwt_secret", make_secret_key_if_needed)}
|
JWTGenerator
|
python
|
pytorch__pytorch
|
torch/ao/quantization/observer.py
|
{
"start": 63906,
"end": 64180
}
|
class ____:
"""
Base class for representing the granularity of quantization.
This class serves as a parent for specific granularity types used in
quantization operations, such as per-tensor or per-axis quantization.
"""
@dataclass(frozen=True)
|
Granularity
|
python
|
pytorch__pytorch
|
torch/_inductor/memory.py
|
{
"start": 10796,
"end": 15458
}
|
class ____:
buffer: Union[SchedulerBuffer, FreeableInputBuffer]
size_alloc: int
size_free: int
start_step: int
end_step: int
def compute_memory_timeline(
nodes: list[BaseSchedulerNode],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
graph_outputs: OrderedSet[str],
) -> tuple[
list[BufferInfo],
dict[BaseSchedulerNode, int],
dict[Union[FreeableInputBuffer, SchedulerBuffer], BaseSchedulerNode],
]:
"""
Compute buffer allocation and deallocation sizes and map their
lifetime to the node schedule
"""
# get the execution step of each node, this will be used to determine
# the end_step of buffers
node_to_step: dict[BaseSchedulerNode, int] = {
node: step for step, node in enumerate(nodes)
}
# get buffers' size and liveliness information
buf_info_list: list[BufferInfo] = []
buf_to_snode_last_use: dict[
Union[FreeableInputBuffer, SchedulerBuffer], BaseSchedulerNode
] = {}
def _get_end_step_and_snode(
buf: Union[FreeableInputBuffer, SchedulerBuffer],
) -> tuple[int, Optional[BaseSchedulerNode]]:
max_step: int = -1
max_step_snode: Optional[BaseSchedulerNode] = None
succ_nodes = buf.mpi_buffer.succ_nodes
if succ_nodes:
for succ_node in succ_nodes:
step = node_to_step[succ_node]
if step > max_step:
max_step = step
max_step_snode = succ_node
assert max_step_snode is not None
return max_step, max_step_snode
# 1. for freeable input buffers
for buf_name, input_buf in name_to_freeable_input_buf.items():
end_step = -1
if buf_name not in graph_outputs:
end_step, end_step_snode = _get_end_step_and_snode(input_buf)
assert end_step_snode is not None
buf_to_snode_last_use[input_buf] = end_step_snode
buf_info_list.append(
BufferInfo(
input_buf,
input_buf.mpi_buffer.size_free,
input_buf.mpi_buffer.size_free,
0,
end_step,
)
)
# 2. for scheduler buffers
for step, node in enumerate(nodes):
for sched_buf in node.get_outputs():
# note: it is possible for a non-graph-output sched_buf to have no succ_nodes and
# to be only used by its defining op (e.g., due to fusion when all consumers of
# the buffer are fused with its defining op). In such cases, end_step is step.
buf_name = sched_buf.get_name()
end_step = -1
if buf_name not in graph_outputs:
end_step, end_step_snode = _get_end_step_and_snode(sched_buf)
if end_step == -1:
end_step = step
buf_to_snode_last_use[sched_buf] = node
else:
assert end_step_snode is not None
buf_to_snode_last_use[sched_buf] = end_step_snode
buf_info_list.append(
BufferInfo(
sched_buf,
sched_buf.mpi_buffer.size_alloc,
sched_buf.mpi_buffer.size_free,
step,
end_step,
)
)
return buf_info_list, node_to_step, buf_to_snode_last_use
def estimate_peak_memory(
nodes: list[BaseSchedulerNode],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
graph_outputs: OrderedSet[str],
) -> tuple[int, list[int]]:
"""
Given a list of nodes in their execution order, estimate the peak memory, by
keeping track of the liveliness of SchedulerBuffers and FreeableInputBuffers.
Returns:
int: peak memory
List[int]: memory usage at each node (or each step).
"""
buf_info_list, _, _ = compute_memory_timeline(
nodes, name_to_freeable_input_buf, graph_outputs
)
# incremental memory changes at each step
memory = [0 for _ in range(len(nodes) + 1)]
# for each buffer, update memory when created and when freed
for buf_info in buf_info_list:
memory[buf_info.start_step] += buf_info.size_alloc
memory[buf_info.end_step + 1] -= buf_info.size_free
# get peak memory by compute the cumulative memories
max_memory = 0
cur_memory = 0
memories_at_nodes = []
for t in range(len(nodes) + 1):
cur_memory += memory[t]
memories_at_nodes.append(cur_memory)
max_memory = max(max_memory, cur_memory)
return (max_memory, memories_at_nodes)
@dataclasses.dataclass
|
BufferInfo
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
|
{
"start": 122473,
"end": 125335
}
|
class ____(DataplexCatalogBaseOperator):
"""
Delete an EntryType resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogDeleteEntryTypeOperator`
:param entry_type_id: Required. EntryType identifier.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"entry_type_id"} | set(DataplexCatalogBaseOperator.template_fields)
)
def __init__(
self,
entry_type_id: str,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_type_id = entry_type_id
def execute(self, context: Context):
self.log.info(
"Deleting Dataplex Catalog EntryType %s.",
self.entry_type_id,
)
try:
operation = self.hook.delete_entry_type(
entry_type_id=self.entry_type_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.hook.wait_for_operation(timeout=self.timeout, operation=operation)
except NotFound:
self.log.info(
"Dataplex Catalog EntryType %s not found.",
self.entry_type_id,
)
raise AirflowException(NotFound)
except Exception as ex:
raise AirflowException(ex)
return None
|
DataplexCatalogDeleteEntryTypeOperator
|
python
|
optuna__optuna
|
optuna/visualization/_hypervolume_history.py
|
{
"start": 619,
"end": 4938
}
|
class ____(NamedTuple):
trial_numbers: list[int]
values: list[float]
@experimental_func("3.3.0")
def plot_hypervolume_history(
study: Study,
reference_point: Sequence[float],
) -> "go.Figure":
"""Plot hypervolume history of all trials in a study.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.
The number of objectives must be 2 or more.
reference_point:
A reference point to use for hypervolume computation.
The dimension of the reference point must be the same as the number of objectives.
Returns:
A :class:`plotly.graph_objects.Figure` object.
"""
_imports.check()
if not study._is_multi_objective():
raise ValueError(
"Study must be multi-objective. For single-objective optimization, "
"please use plot_optimization_history instead."
)
if len(reference_point) != len(study.directions):
raise ValueError(
"The dimension of the reference point must be the same as the number of objectives."
)
info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))
return _get_hypervolume_history_plot(info)
def _get_hypervolume_history_plot(
info: _HypervolumeHistoryInfo,
) -> "go.Figure":
layout = go.Layout(
title="Hypervolume History Plot",
xaxis={"title": "Trial"},
yaxis={"title": "Hypervolume"},
)
data = go.Scatter(
x=info.trial_numbers,
y=info.values,
mode="lines+markers",
)
return go.Figure(data=data, layout=layout)
def _get_hypervolume_history_info(
study: Study,
reference_point: np.ndarray,
) -> _HypervolumeHistoryInfo:
completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
if len(completed_trials) == 0:
_logger.warning("Your study does not have any completed trials.")
# Our hypervolume computation module assumes that all objectives are minimized.
# Here we transform the objective values and the reference point.
signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])
minimization_reference_point = signs * reference_point
# Only feasible trials are considered in hypervolume computation.
trial_numbers = []
hypervolume_values = []
best_trials_values_normalized: np.ndarray | None = None
hypervolume = 0.0
for trial in completed_trials:
trial_numbers.append(trial.number)
has_constraints = _CONSTRAINTS_KEY in trial.system_attrs
if has_constraints:
constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]
if any(map(lambda x: x > 0.0, constraints_values)):
# The trial is infeasible.
hypervolume_values.append(hypervolume)
continue
values_normalized = (signs * trial.values)[np.newaxis, :]
if best_trials_values_normalized is not None:
if (best_trials_values_normalized <= values_normalized).all(axis=1).any(axis=0):
# The trial is not on the Pareto front.
hypervolume_values.append(hypervolume)
continue
if (values_normalized > minimization_reference_point).any():
hypervolume_values.append(hypervolume)
continue
hypervolume += np.prod(minimization_reference_point - values_normalized)
if best_trials_values_normalized is None:
best_trials_values_normalized = values_normalized
else:
limited_sols = np.maximum(best_trials_values_normalized, values_normalized)
hypervolume -= compute_hypervolume(limited_sols, minimization_reference_point)
is_kept = (best_trials_values_normalized < values_normalized).any(axis=1)
best_trials_values_normalized = np.concatenate(
[best_trials_values_normalized[is_kept, :], values_normalized], axis=0
)
hypervolume_values.append(hypervolume)
if best_trials_values_normalized is None:
_logger.warning("Your study does not have any feasible trials.")
return _HypervolumeHistoryInfo(trial_numbers, hypervolume_values)
|
_HypervolumeHistoryInfo
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/transformer.py
|
{
"start": 1140,
"end": 1879
}
|
class ____(object):
"""Contains information about a source code transformation.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
info: EntityInfo, immutable.
namer: naming.Namer.
current_origin: origin_info.OriginInfo, holds the OriginInfo of the last
AST node to be processed successfully. Useful for error handling.
user: An user-supplied context object. The object is opaque to the
infrastructure, but will pe passed through to all custom transformations.
"""
def __init__(self, info, namer, user_context):
self.info = info
self.namer = namer
self.current_origin = None
self.user = user_context
# TODO(mdan): Move to a standalone file.
|
Context
|
python
|
sanic-org__sanic
|
sanic/exceptions.py
|
{
"start": 26273,
"end": 26373
}
|
class ____(SanicException):
"""Exception raised when a file cannot be loaded."""
|
LoadFileException
|
python
|
pypa__pipenv
|
pipenv/vendor/packaging/metadata.py
|
{
"start": 24340,
"end": 32349
}
|
class ____:
"""Representation of distribution metadata.
Compared to :class:`RawMetadata`, this class provides objects representing
metadata fields instead of only using built-in types. Any invalid metadata
will cause :exc:`InvalidMetadata` to be raised (with a
:py:attr:`~BaseException.__cause__` attribute as appropriate).
"""
_raw: RawMetadata
@classmethod
def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:
"""Create an instance from :class:`RawMetadata`.
If *validate* is true, all metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
ins = cls()
ins._raw = data.copy() # Mutations occur due to caching enriched values.
if validate:
exceptions: list[Exception] = []
try:
metadata_version = ins.metadata_version
metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
except InvalidMetadata as metadata_version_exc:
exceptions.append(metadata_version_exc)
metadata_version = None
# Make sure to check for the fields that are present, the required
# fields (so their absence can be reported).
fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS
# Remove fields that have already been checked.
fields_to_check -= {"metadata_version"}
for key in fields_to_check:
try:
if metadata_version:
# Can't use getattr() as that triggers descriptor protocol which
# will fail due to no value for the instance argument.
try:
field_metadata_version = cls.__dict__[key].added
except KeyError:
exc = InvalidMetadata(key, f"unrecognized field: {key!r}")
exceptions.append(exc)
continue
field_age = _VALID_METADATA_VERSIONS.index(
field_metadata_version
)
if field_age > metadata_age:
field = _RAW_TO_EMAIL_MAPPING[key]
exc = InvalidMetadata(
field,
"{field} introduced in metadata version "
"{field_metadata_version}, not {metadata_version}",
)
exceptions.append(exc)
continue
getattr(ins, key)
except InvalidMetadata as exc:
exceptions.append(exc)
if exceptions:
raise ExceptionGroup("invalid metadata", exceptions)
return ins
@classmethod
def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata:
"""Parse metadata from email headers.
If *validate* is true, the metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
raw, unparsed = parse_email(data)
if validate:
exceptions: list[Exception] = []
for unparsed_key in unparsed:
if unparsed_key in _EMAIL_TO_RAW_MAPPING:
message = f"{unparsed_key!r} has invalid data"
else:
message = f"unrecognized field: {unparsed_key!r}"
exceptions.append(InvalidMetadata(unparsed_key, message))
if exceptions:
raise ExceptionGroup("unparsed", exceptions)
try:
return cls.from_raw(raw, validate=validate)
except ExceptionGroup as exc_group:
raise ExceptionGroup(
"invalid or unparsed metadata", exc_group.exceptions
) from None
metadata_version: _Validator[_MetadataVersion] = _Validator()
""":external:ref:`core-metadata-metadata-version`
(required; validated to be a valid metadata version)"""
name: _Validator[str] = _Validator()
""":external:ref:`core-metadata-name`
(required; validated using :func:`~packaging.utils.canonicalize_name` and its
*validate* parameter)"""
version: _Validator[version_module.Version] = _Validator()
""":external:ref:`core-metadata-version` (required)"""
dynamic: _Validator[list[str] | None] = _Validator(
added="2.2",
)
""":external:ref:`core-metadata-dynamic`
(validated against core metadata field names and lowercased)"""
platforms: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-platform`"""
supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-supported-platform`"""
summary: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-summary` (validated to contain no newlines)"""
description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body
""":external:ref:`core-metadata-description`"""
description_content_type: _Validator[str | None] = _Validator(added="2.1")
""":external:ref:`core-metadata-description-content-type` (validated)"""
keywords: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-keywords`"""
home_page: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-home-page`"""
download_url: _Validator[str | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-download-url`"""
author: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author`"""
author_email: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author-email`"""
maintainer: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer`"""
maintainer_email: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer-email`"""
license: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-license`"""
classifiers: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-classifier`"""
requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-dist`"""
requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-python`"""
# Because `Requires-External` allows for non-PEP 440 version specifiers, we
# don't do any processing on the values.
requires_external: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-requires-external`"""
project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-project-url`"""
# PEP 685 lets us raise an error if an extra doesn't pass `Name` validation
# regardless of metadata version.
provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(
added="2.1",
)
""":external:ref:`core-metadata-provides-extra`"""
provides_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-provides-dist`"""
obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-obsoletes-dist`"""
requires: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Requires`` (deprecated)"""
provides: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Provides`` (deprecated)"""
obsoletes: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Obsoletes`` (deprecated)"""
|
Metadata
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py
|
{
"start": 4707,
"end": 5810
}
|
class ____(Benchmark):
r"""
Miele-Cantrell [1]_ objective function.
This class defines the Miele-Cantrell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MieleCantrell}}({x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6
+ \tan^4(x_3 - x_4) + x_1^8
with :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 1, 1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.0, 1.0, 1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((exp(-x[0]) - x[1]) ** 4 + 100 * (x[1] - x[2]) ** 6
+ tan(x[2] - x[3]) ** 4 + x[0] ** 8)
|
MieleCantrell
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/fetch_linked_artifacts.py
|
{
"start": 840,
"end": 1417
}
|
class ____(GQLResult):
version_index: Optional[int] = Field(alias="versionIndex")
aliases: List[ArtifactAliasFragment]
artifact_collection: Optional[CollectionInfoFragment] = Field(
alias="artifactCollection"
)
FetchLinkedArtifacts.model_rebuild()
FetchLinkedArtifactsArtifact.model_rebuild()
FetchLinkedArtifactsArtifactArtifactMemberships.model_rebuild()
FetchLinkedArtifactsArtifactArtifactMembershipsEdges.model_rebuild()
FetchLinkedArtifactsArtifactArtifactMembershipsEdgesNode.model_rebuild()
|
FetchLinkedArtifactsArtifactArtifactMembershipsEdgesNode
|
python
|
docker__docker-py
|
docker/types/containers.py
|
{
"start": 2925,
"end": 4815
}
|
class ____(DictType):
"""
Create a ulimit declaration to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
Example:
>>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
>>> hc = client.create_host_config(ulimits=[nproc_limit])
>>> container = client.create_container(
'busybox', 'true', host_config=hc
)
>>> client.inspect_container(container)['HostConfig']['Ulimits']
[{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
"""
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
|
Ulimit
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/insert/tutorial003_py310.py
|
{
"start": 63,
"end": 983
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str
secret_name: str
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes(): # (1)!
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") # (2)!
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session: # (3)!
session.add(hero_1) # (4)!
session.add(hero_2)
session.add(hero_3)
session.commit() # (5)!
# (6)!
def main(): # (7)!
create_db_and_tables() # (8)!
create_heroes() # (9)!
if __name__ == "__main__": # (10)!
main() # (11)!
|
Hero
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_composer.py
|
{
"start": 12261,
"end": 15252
}
|
class ____(GoogleCloudBaseOperator):
"""
Get an existing environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"environment_id",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
"region": self.region,
"environment_id": self.environment_id,
}
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudComposerEnvironmentLink.persist(context=context)
return Environment.to_dict(result)
|
CloudComposerGetEnvironmentOperator
|
python
|
huggingface__transformers
|
src/transformers/models/dpr/modeling_dpr.py
|
{
"start": 8004,
"end": 8267
}
|
class ____(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = "ctx_encoder"
|
DPRPretrainedContextEncoder
|
python
|
eventlet__eventlet
|
tests/__init__.py
|
{
"start": 3621,
"end": 15084
}
|
class ____(unittest.TestCase):
""" Unittest subclass that adds a timeout to all tests. Subclasses must
be sure to call the LimitedTestCase setUp and tearDown methods. The default
timeout is 1 second, change it by setting TEST_TIMEOUT to the desired
quantity."""
TEST_TIMEOUT = 2
def setUp(self):
self.previous_alarm = None
self.timer = eventlet.Timeout(self.TEST_TIMEOUT,
TestIsTakingTooLong(self.TEST_TIMEOUT))
def reset_timeout(self, new_timeout):
"""Changes the timeout duration; only has effect during one test.
`new_timeout` can be int or float.
"""
self.timer.cancel()
self.timer = eventlet.Timeout(new_timeout,
TestIsTakingTooLong(new_timeout))
def set_alarm(self, new_timeout):
"""Call this in the beginning of your test if you expect busy loops.
Only has effect during one test.
`new_timeout` must be int.
"""
def sig_alarm_handler(sig, frame):
# Could arm previous alarm but test is failed anyway
# seems to be no point in restoring previous state.
raise TestIsTakingTooLong(new_timeout)
self.previous_alarm = (
signal.signal(signal.SIGALRM, sig_alarm_handler),
signal.alarm(new_timeout),
)
def tearDown(self):
self.timer.cancel()
if self.previous_alarm:
signal.signal(signal.SIGALRM, self.previous_alarm[0])
signal.alarm(self.previous_alarm[1])
tpool.killall()
gc.collect()
eventlet.sleep(0)
verify_hub_empty()
def assert_less_than(self, a, b, msg=None):
msg = msg or "%s not less than %s" % (a, b)
assert a < b, msg
assertLessThan = assert_less_than
def assert_less_than_equal(self, a, b, msg=None):
msg = msg or "%s not less than or equal to %s" % (a, b)
assert a <= b, msg
assertLessThanEqual = assert_less_than_equal
def check_idle_cpu_usage(duration, allowed_part):
if resource is None:
# TODO: use https://code.google.com/p/psutil/
raise SkipTest('CPU usage testing not supported (`import resource` failed)')
r1 = resource.getrusage(resource.RUSAGE_SELF)
eventlet.sleep(duration)
r2 = resource.getrusage(resource.RUSAGE_SELF)
utime = r2.ru_utime - r1.ru_utime
stime = r2.ru_stime - r1.ru_stime
# This check is reliably unreliable on Travis/Github Actions, presumably because of CPU
# resources being quite restricted by the build environment. The workaround
# is to apply an arbitrary factor that should be enough to make it work nicely.
if os.environ.get('CI') == 'true':
allowed_part *= 5
assert utime + stime < duration * allowed_part, \
"CPU usage over limit: user %.0f%% sys %.0f%% allowed %.0f%%" % (
utime / duration * 100, stime / duration * 100,
allowed_part * 100)
def verify_hub_empty():
def format_listener(listener):
return 'Listener %r for greenlet %r with run callback %r' % (
listener, listener.greenlet, getattr(listener.greenlet, 'run', None))
from eventlet import hubs
hub = hubs.get_hub()
readers = hub.get_readers()
writers = hub.get_writers()
num_readers = len(readers)
num_writers = len(writers)
num_timers = hub.get_timers_count()
assert num_readers == 0 and num_writers == 0, \
"Readers: %s (%d) Writers: %s (%d)" % (
', '.join(map(format_listener, readers)), num_readers,
', '.join(map(format_listener, writers)), num_writers,
)
def find_command(command):
for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep):
p = os.path.join(dir, command)
if os.access(p, os.X_OK):
return p
raise OSError(errno.ENOENT, 'Command not found: %r' % command)
def silence_warnings(func):
def wrapper(*args, **kw):
warnings.simplefilter('ignore', DeprecationWarning)
try:
return func(*args, **kw)
finally:
warnings.simplefilter('default', DeprecationWarning)
wrapper.__name__ = func.__name__
return wrapper
def get_database_auth():
"""Retrieves a dict of connection parameters for connecting to test databases.
Authentication parameters are highly-machine specific, so
get_database_auth gets its information from either environment
variables or a config file. The environment variable is
"EVENTLET_DB_TEST_AUTH" and it should contain a json object. If
this environment variable is present, it's used and config files
are ignored. If it's not present, it looks in the local directory
(tests) and in the user's home directory for a file named
".test_dbauth", which contains a json map of parameters to the
connect function.
"""
retval = {
'MySQLdb': {'host': 'localhost', 'user': 'root', 'passwd': ''},
'psycopg2': {'user': 'test'},
}
if 'EVENTLET_DB_TEST_AUTH' in os.environ:
return json.loads(os.environ.get('EVENTLET_DB_TEST_AUTH'))
files = [os.path.join(os.path.dirname(__file__), '.test_dbauth'),
os.path.join(os.path.expanduser('~'), '.test_dbauth')]
for f in files:
try:
auth_utf8 = json.load(open(f))
# Have to convert unicode objects to str objects because
# mysqldb is dumb. Using a doubly-nested list comprehension
# because we know that the structure is a two-level dict.
return {
str(modname): {
str(k): str(v) for k, v in connectargs.items()}
for modname, connectargs in auth_utf8.items()}
except OSError:
pass
return retval
def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, expect_pass=False):
new_argv = [sys.executable]
new_env = os.environ.copy()
new_env.setdefault('eventlet_test_in_progress', 'yes')
src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if path:
path = os.path.abspath(path)
new_argv.append(path)
new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir])
if env:
new_env.update(env)
if pythonpath_extend:
new_path = [p for p in new_env.get('PYTHONPATH', '').split(os.pathsep) if p]
new_path.extend(
p if os.path.isabs(p) else os.path.join(src_dir, p) for p in pythonpath_extend
)
new_env['PYTHONPATH'] = os.pathsep.join(new_path)
if args:
new_argv.extend(args)
p = subprocess.Popen(
new_argv,
env=new_env,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
if timeout is None:
timeout = 10
try:
output, _ = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
output, _ = p.communicate(timeout=timeout)
if expect_pass:
sys.stderr.write('Program {} output:\n---\n{}\n---\n'.format(path, output.decode()))
assert False, 'timed out'
return '{}\nFAIL - timed out'.format(output).encode()
if expect_pass:
if output.startswith(b'skip'):
parts = output.rstrip().split(b':', 1)
skip_args = []
if len(parts) > 1:
skip_args.append(parts[1])
raise SkipTest(*skip_args)
lines = output.splitlines()
ok = lines[-1].rstrip() == b'pass'
if not ok or len(lines) > 1:
sys.stderr.write('Program {} output:\n---\n{}\n---\n'.format(path, output.decode(errors="backslashreplace")))
assert ok, 'Expected single line "pass" in stdout'
return output
def run_isolated(path, prefix='tests/isolated/', **kwargs):
kwargs.setdefault('expect_pass', True)
run_python(prefix + path, **kwargs)
def check_is_timeout(obj):
value_text = getattr(obj, 'is_timeout', '(missing)')
assert eventlet.is_timeout(obj), 'type={} str={} .is_timeout={}'.format(type(obj), str(obj), value_text)
@contextlib.contextmanager
def capture_stderr():
stream = io.StringIO()
original = sys.stderr
try:
sys.stderr = stream
yield stream
finally:
sys.stderr = original
stream.seek(0)
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
@contextlib.contextmanager
def dns_tcp_server(ip_to_give, request_count=1):
state = [0] # request count storage writable by thread
host = "localhost"
death_pill = b"DEATH_PILL"
def extract_domain(data):
domain = b''
kind = (data[4] >> 3) & 15 # Opcode bits
if kind == 0: # Standard query
ini = 14
length = data[ini]
while length != 0:
domain += data[ini + 1:ini + length + 1] + b'.'
ini += length + 1
length = data[ini]
return domain
def answer(data, domain):
domain_length = len(domain)
packet = b''
if domain:
# If an ip was given we return it in the answer
if ip_to_give:
packet += data[2:4] + b'\x81\x80'
packet += data[6:8] + data[6:8] + b'\x00\x00\x00\x00' # Questions and answers counts
packet += data[14: 14 + domain_length + 1] # Original domain name question
packet += b'\x00\x01\x00\x01' # Type and class
packet += b'\xc0\x0c\x00\x01' # TTL
packet += b'\x00\x01'
packet += b'\x00\x00\x00\x08'
packet += b'\x00\x04' # Resource data length -> 4 bytes
packet += bytearray(int(x) for x in ip_to_give.split("."))
else:
packet += data[2:4] + b'\x85\x80'
packet += data[6:8] + b'\x00\x00' + b'\x00\x00\x00\x00' # Questions and answers counts
packet += data[14: 14 + domain_length + 1] # Original domain name question
packet += b'\x00\x01\x00\x01' # Type and class
sz = struct.pack('>H', len(packet))
return sz + packet
def serve(server_socket): # thread target
client_sock, address = server_socket.accept()
state[0] += 1
if state[0] <= request_count:
data = bytearray(client_sock.recv(1024))
if data == death_pill:
client_sock.close()
return
domain = extract_domain(data)
client_sock.sendall(answer(data, domain))
client_sock.close()
# Server starts
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, 0))
server_socket.listen(5)
server_addr = server_socket.getsockname()
thread = Thread(target=serve, args=(server_socket, ))
thread.start()
yield server_addr
# Stop the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(server_addr)
client.send(death_pill)
client.close()
thread.join()
server_socket.close()
def read_file(path, mode="rb"):
with open(path, mode) as f:
result = f.read()
return result
|
LimitedTestCase
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/util/test_hex.py
|
{
"start": 1277,
"end": 2045
}
|
class ____:
def test_default_aspect_pointytop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "pointytop")
sq3 = np.sqrt(3)
assert list(x) == [0, sq3/2, -sq3/2, sq3, -sq3/2, sq3/2, -sq3]
assert list(y) == [-0.0, -1.5, 1.5, -0.0, -1.5, 1.5, -0.0]
def test_default_aspect_flattop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "flattop")
sq3 = np.sqrt(3)
assert list(x) == [0.0, 0.0, 0.0, 1.5, -1.5, 1.5, -1.5]
assert list(y) == [0, -sq3, sq3, -sq3/2, -sq3/2, sq3/2, sq3/2]
|
Test_axial_to_cartesian
|
python
|
simonw__datasette
|
datasette/views/row.py
|
{
"start": 6487,
"end": 7392
}
|
class ____(BaseView):
name = "row-delete"
def __init__(self, datasette):
self.ds = datasette
async def post(self, request):
ok, resolved = await _resolve_row_and_check_permission(
self.ds, request, "delete-row"
)
if not ok:
return resolved
# Delete table
def delete_row(conn):
sqlite_utils.Database(conn)[resolved.table].delete(resolved.pk_values)
try:
await resolved.db.execute_write_fn(delete_row)
except Exception as e:
return _error([str(e)], 500)
await self.ds.track_event(
DeleteRowEvent(
actor=request.actor,
database=resolved.db.name,
table=resolved.table,
pks=resolved.pk_values,
)
)
return Response.json({"ok": True}, status=200)
|
RowDeleteView
|
python
|
huggingface__transformers
|
src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
{
"start": 4598,
"end": 6126
}
|
class ____(nn.Module):
"""Normalization block
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.norm_mlp = config.norm_mlp
if "batch" in config.norm_mlp.lower():
self.norm = PatchTSMixerBatchNorm(config)
else:
self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps)
def forward(self, inputs: torch.Tensor):
"""
Args:
inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):
Input to the normalization layer.
Returns:
`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`
"""
if "batch" in self.norm_mlp.lower():
# reshape the data
inputs_reshaped = torch.reshape(
inputs,
(
inputs.shape[0] * inputs.shape[1],
inputs.shape[2],
inputs.shape[3],
),
) # inputs_reshaped: [batch_size*num_channels, num_patches, d_model]
# inputs_reshaped: [batch_size*num_channels, num_patches, d_model]
inputs_reshaped = self.norm(inputs_reshaped)
# put back data to the original shape
inputs = torch.reshape(inputs_reshaped, inputs.shape)
else:
inputs = self.norm(inputs)
return inputs
|
PatchTSMixerNormLayer
|
python
|
pytorch__pytorch
|
torch/nn/attention/flex_attention.py
|
{
"start": 10098,
"end": 17101
}
|
class ____(Enum):
"""Enum for the type of modification function.
- SCORE_MOD: score_mod function which accepts a score as the first argument
- mask_mod: mask function which does not accept a score and is only used for generating
block mask
"""
SCORE_MOD = 1
MASK_MOD = 2
UNKNOWN = 3
def _get_mod_type(fn: Callable) -> _ModificationType:
"""Get the type of modification function.
This function inspects the number of positional arguments of the function to determine
the type of modification function. If the function has 5 positional arguments, it is
considered as a score_mod function. If the function has 4 positional arguments, it is
considered as a mask function.
"""
if hasattr(fn, "__code__"):
code = fn.__code__
num_positional_total = code.co_argcount
defaults = ()
if hasattr(fn, "__defaults__"):
defaults = fn.__defaults__ or ()
num_defaults = len(defaults)
num_positional_args = num_positional_total - num_defaults
else:
num_positional_args = sum(
1
for param in inspect.signature(fn).parameters.values()
if param.default is inspect.Parameter.empty
)
assert num_positional_args == 5 or num_positional_args == 4
if num_positional_args == 5:
return _ModificationType.SCORE_MOD
elif num_positional_args == 4:
return _ModificationType.MASK_MOD
else:
return _ModificationType.UNKNOWN
# Need to define it here so that Dynamo doesn't skip it
def _vmap_for_bhqkv(
fn: Callable,
prefix: tuple[int | None, ...],
suffix: tuple[int | None, ...] = (),
out_dims: int | list[int | None] = 0,
group_dim: bool = False,
):
"""Used to vmap both score_mods and mask_mods over 4-dimensional/5-dimension inputs.
Mapping over the [b, hq, q_idx, kv_idx] or [b, hkv, g, q_idx, kv_idx] dimensions.
Args:
fn (callable): The function to vmap.
prefix (tuple): The prefix of the vmap. For score mod functions,
this should be set to (0,). For mask_mods = ()
suffix (tuple): We need to add (0,) if gradOut is being mapped over,
and (None,) * len(other_buffers).
out_dims (tuple): For forward cases, keep this as the default 0 since
we are only returning 1 output. For backwards, the joint
graph returns grads for B, H, Q_idx, KV_idx and other_buffers,
so we set this to (0, None, None, None, None) + (None,) * len(other_buffers).
Returns:
callable: The vmapped function.
"""
# We vamp a function 4 times, broadcasting the [b, h, q_idx, kv_idx] dimensions
dimensions: list[tuple[None | int, None | int, None | int, None | int]] = []
dimensions = [
(None, None, None, 0),
(None, None, 0, None),
(None, 0, None, None),
]
if group_dim:
dimensions += [
(None, 0, None, None),
]
dimensions += [
(0, None, None, None),
]
for dims in dimensions:
fn = torch.vmap(fn, in_dims=prefix + dims + suffix, out_dims=out_dims) # type: ignore[arg-type]
return fn
def _identity(
score: Tensor,
batch: Tensor,
head: Tensor,
token_q: Tensor,
token_kv: Tensor,
) -> Tensor:
return score
def noop_mask(
batch: Tensor,
head: Tensor,
token_q: Tensor,
token_kv: Tensor,
) -> Tensor:
"""Returns a noop mask_mod"""
return batch.new_ones(size=(), dtype=torch.bool, device=batch.device)
def _sliced_mask_mod_error(
batch: Tensor,
head: Tensor,
token_q: Tensor,
token_kv: Tensor,
) -> Tensor:
"""
Raises helpful error when using mask_mod from a sliced BlockMask.
After slicing a BlockMask, the mask_mod is reset and cannot be used directly.
Users must reassign mask_mod from the original (unsliced) BlockMask.
"""
raise RuntimeError(
"Cannot use mask_mod from a sliced BlockMask. "
"When you slice a BlockMask using [], the mask_mod attribute is reset. "
"You must set it from the original BlockMask's mask_mod."
"\n\nIncorrect usage:"
"\n base_mask = create_block_mask(my_mask_fn, ...)"
"\n sliced_mask = base_mask[:, :, block_idx]"
"\n sliced_mask.mask_mod = apply_offset(sliced_mask.mask_mod, offset) # WRONG!"
"\n\nCorrect usage:"
"\n base_mask = create_block_mask(my_mask_fn, ...)"
"\n sliced_mask = base_mask[:, :, block_idx]"
"\n sliced_mask.mask_mod = apply_offset(base_mask.mask_mod, offset) # Use base_mask!"
)
_DEFAULT_SPARSE_BLOCK_SIZE = 128
_LARGE_SPARSE_BLOCK_SIZE = 1 << 30
def _ordered_to_dense(num_blocks_in_row: Tensor, col_indices: Tensor):
num_rows = col_indices.shape[-2]
num_cols = col_indices.shape[-1]
batch_dims = num_blocks_in_row.shape[:-1]
device = num_blocks_in_row.device
def create_dense_one(kv_num_blocks, kv_indices):
dense_mask = kv_indices.new_zeros(num_rows, num_cols + 1, dtype=torch.int32)
row_indices = torch.arange(num_rows, dtype=torch.int, device=device).unsqueeze(
-1
)
col_range = torch.arange(num_cols, dtype=torch.int, device=device)
index_mask = col_range < kv_num_blocks.unsqueeze(-1)
# We write to one spot "out of bounds"
valid_indices = torch.where(index_mask, kv_indices, num_cols)
# set the values in 'a' to 1 where the indices are valid
dense_mask[row_indices, valid_indices] = dense_mask.new_ones(())
return dense_mask[:, :num_cols].contiguous()
create_dense_batched = create_dense_one
for _ in range(len(batch_dims)):
create_dense_batched = torch.vmap(create_dense_batched, in_dims=(0, 0))
out = create_dense_batched(num_blocks_in_row, col_indices)
return out
def _dense_to_ordered(dense_mask) -> tuple[Tensor, Tensor]:
dense_mask = dense_mask.to(dtype=torch.int32)
num_blocks_in_row = dense_mask.sum(dim=-1)
col_indices = torch.argsort(dense_mask, dim=-1, descending=True, stable=True)
return (
num_blocks_in_row.to(torch.int32, memory_format=torch.contiguous_format),
col_indices.to(torch.int32, memory_format=torch.contiguous_format),
)
def _transpose_ordered(num_blocks_in_row: Tensor, col_indices: Tensor):
dense = _ordered_to_dense(num_blocks_in_row, col_indices)
return _dense_to_ordered(dense.transpose(-2, -1))
def _adjust_num_blocks_and_indices(
num_blocks: Tensor,
indices: Tensor,
new_num_rows: int,
new_num_cols: int,
):
indices = indices[:, :, :new_num_rows, :new_num_cols]
num_blocks = num_blocks[:, :, :new_num_rows]
num_blocks = torch.where(num_blocks < new_num_cols, num_blocks, new_num_cols)
num_blocks = torch.sum(indices < num_blocks[:, :, :, None], dim=-1).to(torch.int32)
return num_blocks, indices
|
_ModificationType
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 79613,
"end": 80916
}
|
class ____(Response):
"""
Response of queues.move_task_forward endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"position": {
"description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskForwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property("position")
def position(self) -> Optional[int]:
return self._property_position
@position.setter
def position(self, value: Optional[int]) -> None:
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
|
MoveTaskForwardResponse
|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 122446,
"end": 125648
}
|
class ____:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
assert_((out >= 0).all())
tgt = [-1 * (i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1 * inp, err_msg=msg)
d = -1 * inp
np.negative(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
@pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64'])
@pytest.mark.parametrize("big", [True, False])
def test_noncontiguous(self, dtype, big):
data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6,
6, -2.2251e-308, -8, 10], dtype=dtype)
expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6,
-6, 2.2251e-308, 8, -10], dtype=dtype)
if big:
data = np.repeat(data, 10)
expect = np.repeat(expect, 10)
out = np.ndarray(data.shape, dtype=dtype)
ncontig_in = data[1::2]
ncontig_out = out[1::2]
contig_in = np.array(ncontig_in)
# contig in, contig out
assert_array_equal(np.negative(contig_in), expect[1::2])
# contig in, ncontig out
assert_array_equal(np.negative(contig_in, out=ncontig_out),
expect[1::2])
# ncontig in, contig out
assert_array_equal(np.negative(ncontig_in), expect[1::2])
# ncontig in, ncontig out
assert_array_equal(np.negative(ncontig_in, out=ncontig_out),
expect[1::2])
# contig in, contig out, nd stride
data_split = np.array(np.array_split(data, 2))
expect_split = np.array(np.array_split(expect, 2))
assert_equal(np.negative(data_split), expect_split)
|
TestAbsoluteNegative
|
python
|
scrapy__scrapy
|
tests/spiders.py
|
{
"start": 14858,
"end": 15309
}
|
class ____(CrawlSpiderWithParseMethod):
name = "crawl_spider_with_process_request_cb_kwargs"
rules = (
Rule(
LinkExtractor(),
callback="parse",
follow=True,
process_request="process_request",
),
)
def process_request(self, request, response):
request.cb_kwargs["foo"] = "process_request"
return request
|
CrawlSpiderWithProcessRequestCallbackKeywordArguments
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_lsq_linear.py
|
{
"start": 9430,
"end": 9507
}
|
class ____(BaseMixin):
method = 'bvls'
lsq_solvers = ['exact']
|
TestBVLS
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-absolute-difference-queries.py
|
{
"start": 65,
"end": 926
}
|
class ____(object):
def minDifference(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
INF = float("inf")
prefix = [[0]*(max(nums)+1)]
for num in nums:
prefix.append(prefix[-1][:])
prefix[-1][num] += 1
result = []
for l, r in queries:
min_diff, prev = INF, -1
for num in xrange(len(prefix[0])):
if not (prefix[l][num] < prefix[r+1][num]):
continue
if prev != -1:
min_diff = min(min_diff, num-prev)
prev = num
result.append(min_diff if min_diff != INF else -1)
return result
# Time: O(r + n + q * r * logn), r is the max of nums
# Space: O(r + n)
import bisect
|
Solution
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1beta1_ip_address_list.py
|
{
"start": 383,
"end": 6993
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1IPAddress]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1IPAddressList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1IPAddressList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1IPAddressList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1IPAddressList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1IPAddressList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1IPAddressList. # noqa: E501
items is the list of IPAddresses. # noqa: E501
:return: The items of this V1beta1IPAddressList. # noqa: E501
:rtype: list[V1beta1IPAddress]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1IPAddressList.
items is the list of IPAddresses. # noqa: E501
:param items: The items of this V1beta1IPAddressList. # noqa: E501
:type: list[V1beta1IPAddress]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1IPAddressList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1IPAddressList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1IPAddressList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1IPAddressList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1IPAddressList. # noqa: E501
:return: The metadata of this V1beta1IPAddressList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1IPAddressList.
:param metadata: The metadata of this V1beta1IPAddressList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1IPAddressList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1IPAddressList):
return True
return self.to_dict() != other.to_dict()
|
V1beta1IPAddressList
|
python
|
pytorch__pytorch
|
test/distributed/test_store.py
|
{
"start": 22103,
"end": 23016
}
|
class ____(TestCase, StoreTestBase):
def setUp(self):
super().setUp()
self.tcpstore = create_tcp_store()
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return dist.PrefixStore(self.prefix, self.tcpstore)
# The PrefixTCPStore has 6 keys in test_set_get. It contains the 5 keys
# added by the user and one additional key used for coordinate all the
# workers.
@property
def num_keys_total(self):
return 6
def test_underlying_non_prefix_store(self):
store = self._create_store()
wrapped_store = dist.PrefixStore(
self.prefix, dist.PrefixStore(self.prefix, store)
)
self.assertEqual(self.tcpstore, store._underlying_non_prefix_store)
self.assertEqual(self.tcpstore, wrapped_store._underlying_non_prefix_store)
|
PrefixTCPStoreTest
|
python
|
py-pdf__pypdf
|
pypdf/filters.py
|
{
"start": 13488,
"end": 15974
}
|
class ____:
"""
The RunLengthDecode filter decodes data that has been encoded in a
simple byte-oriented format based on run length.
The encoded data is a sequence of runs, where each run consists of
a length byte followed by 1 to 128 bytes of data. If the length byte is
in the range 0 to 127,
the following length + 1 (1 to 128) bytes are copied literally during
decompression.
If length is in the range 129 to 255, the following single byte is to be
copied 257 − length (2 to 128) times during decompression. A length value
of 128 denotes EOD.
"""
@staticmethod
def decode(
data: bytes,
decode_parms: Optional[DictionaryObject] = None,
**kwargs: Any,
) -> bytes:
"""
Decode a run length encoded data stream.
Args:
data: a bytes sequence of length/data
decode_parms: this filter does not use parameters.
Returns:
A bytes decompressed sequence.
Raises:
PdfStreamError:
"""
lst = []
index = 0
while True:
if index >= len(data):
logger_warning(
"missing EOD in RunLengthDecode, check if output is OK", __name__
)
break # Reached end of string without an EOD
length = data[index]
index += 1
if length == 128:
data_length = len(data)
if index < data_length:
# We should first check, if we have an inner stream from a multi-encoded
# stream with a faulty trailing newline that we can decode properly.
# We will just ignore the last byte and raise a warning ...
if (index == data_length - 1) and (data[index : index+1] == b"\n"):
logger_warning(
"Found trailing newline in stream data, check if output is OK", __name__
)
break
raise PdfStreamError("Early EOD in RunLengthDecode")
break
if length < 128:
length += 1
lst.append(data[index : (index + length)])
index += length
else: # >128
length = 257 - length
lst.append(bytes((data[index],)) * length)
index += 1
return b"".join(lst)
|
RunLengthDecode
|
python
|
django__django
|
tests/backends/models.py
|
{
"start": 1274,
"end": 1722
}
|
class ____(models.Model):
primary_key_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.AutoField(
primary_key=True
)
charfield_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.CharField(
max_length=100
)
m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = (
models.ManyToManyField(Person, blank=True)
)
|
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/rest.py
|
{
"start": 422,
"end": 4600
}
|
class ____(Enum):
"""
Available HTTP request methods.
"""
GET = "get"
POST = "post"
PUT = "put"
DELETE = "delete"
PATCH = "patch"
def serialize_model(obj: Any) -> Any:
"""
Recursively serializes `pydantic.BaseModel` into JSON;
returns original obj if not a `BaseModel`.
Args:
obj: Input object to serialize.
Returns:
Serialized version of object.
"""
if isinstance(obj, list):
return [serialize_model(o) for o in obj]
elif isinstance(obj, Dict):
return {k: serialize_model(v) for k, v in obj.items()}
if isinstance(obj, BaseModel):
return obj.model_dump(mode="json")
elif isinstance(obj, Enum):
return obj.value
return obj
def strip_kwargs(**kwargs: Dict) -> Dict:
"""
Recursively drops keyword arguments if value is None,
and serializes any `pydantic.BaseModel` types.
Args:
**kwargs: Input keyword arguments.
Returns:
Stripped version of kwargs.
"""
stripped_dict = {}
for k, v in kwargs.items():
v = serialize_model(v)
if isinstance(v, dict):
v = strip_kwargs(**v)
if v is not None:
stripped_dict[k] = v
return stripped_dict or {}
@task
async def execute_endpoint(
endpoint: str,
databricks_credentials: "DatabricksCredentials",
http_method: HTTPMethod = HTTPMethod.GET,
params: Dict[str, Any] = None,
json: Dict[str, Any] = None,
**kwargs: Dict[str, Any],
) -> httpx.Response:
"""
Generic function for executing REST endpoints.
Args:
endpoint: The endpoint route.
databricks_credentials: Credentials to use for authentication with Databricks.
http_method: Either GET, POST, PUT, DELETE, or PATCH.
params: URL query parameters in the request.
json: JSON serializable object to include in the body of the request.
**kwargs: Additional keyword arguments to pass.
Returns:
The httpx.Response from interacting with the endpoint.
Examples:
Lists jobs on the Databricks instance.
```python
from prefect import flow
from prefect_databricks import DatabricksCredentials
from prefect_databricks.rest import execute_endpoint
@flow
def example_execute_endpoint_flow():
endpoint = "/2.1/jobs/list"
databricks_credentials = DatabricksCredentials.load("my-block")
params = {
"limit": 5,
"offset": None,
"expand_tasks": True,
}
response = execute_endpoint(
endpoint,
databricks_credentials,
params=params
)
return response.json()
```
"""
if isinstance(http_method, HTTPMethod):
http_method = http_method.value
if params is not None:
stripped_params = strip_kwargs(**params)
else:
stripped_params = None
if json is not None:
kwargs["json"] = strip_kwargs(**json)
async with databricks_credentials.get_client() as client:
response = await getattr(client, http_method)(
endpoint, params=stripped_params, **kwargs
)
return response
def _unpack_contents(
response: httpx.Response, responses: Optional[Dict[int, str]] = None
) -> Union[Dict[str, Any], bytes]:
"""
Helper method to unpack the contents from the httpx.Response,
reporting errors in a helpful manner, if any.
"""
try:
response.raise_for_status()
except httpx.HTTPStatusError as exc:
helpful_error_response = (responses or {}).get(response.status_code, "")
try:
helpful_error_response += f"JSON response: {response.json()}"
except Exception:
pass
if helpful_error_response:
raise httpx.HTTPStatusError(
helpful_error_response, request=exc.request, response=exc.response
) from exc
else:
raise
try:
return response.json()
except json.JSONDecodeError:
return response.content
|
HTTPMethod
|
python
|
pypa__pip
|
src/pip/_internal/index/package_finder.py
|
{
"start": 3709,
"end": 12717
}
|
class ____:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name: str,
canonical_name: NormalizedName,
formats: frozenset[str],
target_python: TargetPython,
allow_yanked: bool,
ignore_requires_python: bool | None = None,
) -> None:
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link: Link) -> tuple[LinkType, str]:
"""
Determine whether a link is a candidate for installation.
:return: A tuple (result, detail), where *result* is an enum
representing whether the evaluation found a candidate, or the reason
why one is not found. If a candidate is found, *detail* will be the
candidate's version string; if one is not found, it contains the
reason the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or "<none given>"
return (LinkType.yanked, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (LinkType.format_unsupported, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
return (
LinkType.format_unsupported,
f"unsupported archive format: {ext}",
)
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = f"No binaries permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if "macosx10" in link.path and ext == ".zip":
return (LinkType.format_unsupported, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (
LinkType.format_invalid,
"invalid wheel filename",
)
if wheel.name != self._canonical_name:
reason = f"wrong project name (not {self.project_name})"
return (LinkType.different_project, reason)
supported_tags = self._target_python.get_unsorted_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = ", ".join(wheel.get_formatted_file_tags())
reason = (
f"none of the wheel's tags ({file_tags}) are compatible "
f"(run pip debug --verbose to show compatible tags)"
)
return (LinkType.platform_mismatch, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f"No sources permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if not version:
version = _extract_version_from_fragment(
egg_info,
self._canonical_name,
)
if not version:
reason = f"Missing project version for {self.project_name}"
return (LinkType.format_invalid, reason)
match = self._py_version_re.search(version)
if match:
version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (
LinkType.platform_mismatch,
"Python version is incorrect",
)
supports_python = _check_link_requires_python(
link,
version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
requires_python = link.requires_python
if requires_python:
def get_version_sort_key(v: str) -> tuple[int, ...]:
return tuple(int(s) for s in v.split(".") if s.isdigit())
requires_python = ",".join(
sorted(
(str(s) for s in specifiers.SpecifierSet(requires_python)),
key=get_version_sort_key,
)
)
reason = f"{version} Requires-Python {requires_python}"
return (LinkType.requires_python_mismatch, reason)
logger.debug("Found link %s, version: %s", link, version)
return (LinkType.candidate, version)
def filter_unallowed_hashes(
candidates: list[InstallationCandidate],
hashes: Hashes | None,
project_name: str,
) -> list[InstallationCandidate]:
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
"Given no hashes to check %s links for project %r: "
"discarding no candidates",
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = "discarding no candidates"
else:
discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
"\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
"Checked %s links for project %r against %s hashes "
"(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message,
)
return filtered
@dataclass
|
LinkEvaluator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
|
{
"start": 17926,
"end": 18574
}
|
class ____(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
def build_dataset(num_elements, batch_size):
return distribute._LegacyRebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_replicas=4)
verify_fn(self, lambda: build_dataset(64, 8), num_outputs=8)
if __name__ == "__main__":
test.main()
|
LegacyRebatchDatasetCheckpointTest
|
python
|
django__django
|
tests/utils_tests/test_numberformat.py
|
{
"start": 153,
"end": 7434
}
|
class ____(SimpleTestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, "."), "1234")
self.assertEqual(nformat(1234.2, "."), "1234.2")
self.assertEqual(nformat(1234, ".", decimal_pos=2), "1234.00")
self.assertEqual(nformat(1234, ".", grouping=2, thousand_sep=","), "1234")
self.assertEqual(
nformat(1234, ".", grouping=2, thousand_sep=",", force_grouping=True),
"12,34",
)
self.assertEqual(nformat(-1234.33, ".", decimal_pos=1), "-1234.3")
# The use_l10n parameter can force thousand grouping behavior.
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
nformat(1234, ".", grouping=3, thousand_sep=",", use_l10n=False), "1234"
)
self.assertEqual(
nformat(1234, ".", grouping=3, thousand_sep=",", use_l10n=True), "1,234"
)
def test_format_string(self):
self.assertEqual(nformat("1234", "."), "1234")
self.assertEqual(nformat("1234.2", "."), "1234.2")
self.assertEqual(nformat("1234", ".", decimal_pos=2), "1234.00")
self.assertEqual(nformat("1234", ".", grouping=2, thousand_sep=","), "1234")
self.assertEqual(
nformat("1234", ".", grouping=2, thousand_sep=",", force_grouping=True),
"12,34",
)
self.assertEqual(nformat("-1234.33", ".", decimal_pos=1), "-1234.3")
self.assertEqual(
nformat(
"10000", ".", grouping=3, thousand_sep="comma", force_grouping=True
),
"10comma000",
)
def test_large_number(self):
most_max = (
"{}179769313486231570814527423731704356798070567525844996"
"598917476803157260780028538760589558632766878171540458953"
"514382464234321326889464182768467546703537516986049910576"
"551282076245490090389328944075868508455133942304583236903"
"222948165808559332123348274797826204144723168738177180919"
"29988125040402618412485836{}"
)
most_max2 = (
"{}35953862697246314162905484746340871359614113505168999"
"31978349536063145215600570775211791172655337563430809179"
"07028764928468642653778928365536935093407075033972099821"
"15310256415249098018077865788815173701691026788460916647"
"38064458963316171186642466965495956524082894463374763543"
"61838599762500808052368249716736"
)
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, "."), most_max.format("", "8"))
self.assertEqual(nformat(int_max + 1, "."), most_max.format("", "9"))
self.assertEqual(nformat(int_max * 2, "."), most_max2.format(""))
self.assertEqual(nformat(0 - int_max, "."), most_max.format("-", "8"))
self.assertEqual(nformat(-1 - int_max, "."), most_max.format("-", "9"))
self.assertEqual(nformat(-2 * int_max, "."), most_max2.format("-"))
def test_float_numbers(self):
tests = [
(9e-10, 10, "0.0000000009"),
(9e-19, 2, "0.00"),
(0.00000000000099, 0, "0"),
(0.00000000000099, 13, "0.0000000000009"),
(1e16, None, "10000000000000000"),
(1e16, 2, "10000000000000000.00"),
# A float without a fractional part (3.) results in a ".0" when no
# decimal_pos is given. Contrast that with the Decimal('3.') case
# in test_decimal_numbers which doesn't return a fractional part.
(3.0, None, "3.0"),
]
for value, decimal_pos, expected_value in tests:
with self.subTest(value=value, decimal_pos=decimal_pos):
self.assertEqual(nformat(value, ".", decimal_pos), expected_value)
# Thousand grouping behavior.
self.assertEqual(
nformat(1e16, ".", thousand_sep=",", grouping=3, force_grouping=True),
"10,000,000,000,000,000",
)
self.assertEqual(
nformat(
1e16,
".",
decimal_pos=2,
thousand_sep=",",
grouping=3,
force_grouping=True,
),
"10,000,000,000,000,000.00",
)
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal("1234"), "."), "1234")
self.assertEqual(nformat(Decimal("1234.2"), "."), "1234.2")
self.assertEqual(nformat(Decimal("1234"), ".", decimal_pos=2), "1234.00")
self.assertEqual(
nformat(Decimal("1234"), ".", grouping=2, thousand_sep=","), "1234"
)
self.assertEqual(
nformat(
Decimal("1234"), ".", grouping=2, thousand_sep=",", force_grouping=True
),
"12,34",
)
self.assertEqual(nformat(Decimal("-1234.33"), ".", decimal_pos=1), "-1234.3")
self.assertEqual(
nformat(Decimal("0.00000001"), ".", decimal_pos=8), "0.00000001"
)
self.assertEqual(nformat(Decimal("9e-19"), ".", decimal_pos=2), "0.00")
self.assertEqual(nformat(Decimal(".00000000000099"), ".", decimal_pos=0), "0")
self.assertEqual(
nformat(
Decimal("1e16"), ".", thousand_sep=",", grouping=3, force_grouping=True
),
"10,000,000,000,000,000",
)
self.assertEqual(
nformat(
Decimal("1e16"),
".",
decimal_pos=2,
thousand_sep=",",
grouping=3,
force_grouping=True,
),
"10,000,000,000,000,000.00",
)
self.assertEqual(nformat(Decimal("3."), "."), "3")
self.assertEqual(nformat(Decimal("3.0"), "."), "3.0")
# Very large & small numbers.
tests = [
("9e9999", None, "9e+9999"),
("9e9999", 3, "9.000e+9999"),
("9e201", None, "9e+201"),
("9e200", None, "9e+200"),
("1.2345e999", 2, "1.23e+999"),
("9e-999", None, "9e-999"),
("1e-7", 8, "0.00000010"),
("1e-8", 8, "0.00000001"),
("1e-9", 8, "0.00000000"),
("1e-10", 8, "0.00000000"),
("1e-11", 8, "0.00000000"),
("1" + ("0" * 300), 3, "1.000e+300"),
("0.{}1234".format("0" * 299), 3, "0.000"),
]
for value, decimal_pos, expected_value in tests:
with self.subTest(value=value):
self.assertEqual(
nformat(Decimal(value), ".", decimal_pos), expected_value
)
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super().__format__(specifier, **kwargs)
return "€ {}".format(amount)
price = EuroDecimal("1.23")
self.assertEqual(nformat(price, ","), "€ 1,23")
def test_empty(self):
self.assertEqual(nformat("", "."), "")
self.assertEqual(nformat(None, "."), "None")
|
TestNumberFormat
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_function_base.py
|
{
"start": 45467,
"end": 57134
}
|
class ____(TestCase):
def test_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], 5)
assert_array_equal(r, [5, 8, 1, 4])
def test_large(self):
x = np.linspace(-3, 2, 10000)
f = vectorize(lambda x: x)
y = f(x)
assert_array_equal(y, x)
def test_ufunc(self):
f = vectorize(math.cos)
args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
r1 = f(args)
r2 = np.cos(args)
assert_array_almost_equal(r1, r2)
def test_keywords(self):
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords_with_otypes_order1(self):
# gh-1620: The second call of f would crash with
# `ValueError: invalid number of arguments`.
f = vectorize(_foo1, otypes=[float])
# We're testing the caching of ufuncs by vectorize, so the order
# of these function calls is an important part of the test.
r1 = f(np.arange(3.0), 1.0)
r2 = f(np.arange(3.0))
assert_array_equal(r1, r2)
def test_keywords_with_otypes_order2(self):
# gh-1620: The second call of f would crash with
# `ValueError: non-broadcastable output operand with shape ()
# doesn't match the broadcast shape (3,)`.
f = vectorize(_foo1, otypes=[float])
# We're testing the caching of ufuncs by vectorize, so the order
# of these function calls is an important part of the test.
r1 = f(np.arange(3.0))
r2 = f(np.arange(3.0), 1.0)
assert_array_equal(r1, r2)
def test_keywords_with_otypes_order3(self):
# gh-1620: The third call of f would crash with
# `ValueError: invalid number of arguments`.
f = vectorize(_foo1, otypes=[float])
# We're testing the caching of ufuncs by vectorize, so the order
# of these function calls is an important part of the test.
r1 = f(np.arange(3.0))
r2 = f(np.arange(3.0), y=1.0)
r3 = f(np.arange(3.0))
assert_array_equal(r1, r2)
assert_array_equal(r1, r3)
def test_keywords_with_otypes_several_kwd_args1(self):
# gh-1620 Make sure different uses of keyword arguments
# don't break the vectorized function.
f = vectorize(_foo2, otypes=[float])
# We're testing the caching of ufuncs by vectorize, so the order
# of these function calls is an important part of the test.
r1 = f(10.4, z=100)
r2 = f(10.4, y=-1)
r3 = f(10.4)
assert_equal(r1, _foo2(10.4, z=100))
assert_equal(r2, _foo2(10.4, y=-1))
assert_equal(r3, _foo2(10.4))
def test_keywords_with_otypes_several_kwd_args2(self):
# gh-1620 Make sure different uses of keyword arguments
# don't break the vectorized function.
f = vectorize(_foo2, otypes=[float])
# We're testing the caching of ufuncs by vectorize, so the order
# of these function calls is an important part of the test.
r1 = f(z=100, x=10.4, y=-1)
r2 = f(1, 2, 3)
assert_equal(r1, _foo2(z=100, x=10.4, y=-1))
assert_equal(r2, _foo2(1, 2, 3))
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
# inspect the func_code.
import random
try:
vectorize(random.randrange) # Should succeed
except Exception:
raise AssertionError # noqa: B904
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords3_ticket_2100(self):
# Test excluded with mixed positional and kwargs: ticket 2100
def mypolyval(x, p):
_p = list(p)
res = _p.pop(0)
while _p:
res = res * x + _p.pop(0)
return res
vpolyval = np.vectorize(mypolyval, excluded=["p", 1])
ans = [3, 6]
assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
def test_keywords4_ticket_2100(self):
# Test vectorizing function with no positional args.
@vectorize
def f(**kw):
res = 1.0
for _k in kw:
res *= kw[_k]
return res
assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
def test_keywords5_ticket_2100(self):
# Test vectorizing function with no kwargs args.
@vectorize
def f(*v):
return np.prod(v)
assert_array_equal(f([1, 2], [3, 4]), [3, 8])
def test_coverage1_ticket_2100(self):
def foo():
return 1
f = vectorize(foo)
assert_array_equal(f(), 1)
def test_assigning_docstring(self):
def foo(x):
"""Original documentation"""
return x
f = vectorize(foo)
assert_equal(f.__doc__, foo.__doc__)
doc = "Provided documentation"
f = vectorize(foo, doc=doc)
assert_equal(f.__doc__, doc)
def test_UnboundMethod_ticket_1156(self):
# Regression test for issue 1156
class Foo:
b = 2
def bar(self, a):
return a**self.b
assert_array_equal(vectorize(Foo().bar)(np.arange(9)), np.arange(9) ** 2)
assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), np.arange(9) ** 2)
def test_execution_order_ticket_1487(self):
# Regression test for dependence on execution order: issue 1487
f1 = vectorize(lambda x: x)
res1a = f1(np.arange(3))
res1b = f1(np.arange(0.1, 3))
f2 = vectorize(lambda x: x)
res2b = f2(np.arange(0.1, 3))
res2a = f2(np.arange(3))
assert_equal(res1a, res2a)
assert_equal(res1b, res2b)
def test_string_ticket_1892(self):
# Test vectorization over strings: issue 1892.
f = np.vectorize(lambda x: x)
s = string.digits * 10
assert_equal(s, f(s))
def test_cache(self):
# Ensure that vectorized func called exactly once per argument.
_calls = [0]
@vectorize
def f(x):
_calls[0] += 1
return x**2
f.cache = True
x = np.arange(5)
assert_array_equal(f(x), x * x)
assert_equal(_calls[0], len(x))
def test_otypes(self):
f = np.vectorize(lambda x: x)
f.otypes = "i"
x = np.arange(5)
assert_array_equal(f(x), x)
def test_signature_mean_last(self):
def mean(a):
return a.mean()
f = vectorize(mean, signature="(n)->()")
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [2, 3])
def test_signature_center(self):
def center(a):
return a - a.mean()
f = vectorize(center, signature="(n)->(n)")
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [[-1, 1], [-1, 1]])
def test_signature_two_outputs(self):
f = vectorize(lambda x: (x, x), signature="()->(),()")
r = f([1, 2, 3])
assert_(isinstance(r, tuple) and len(r) == 2)
assert_array_equal(r[0], [1, 2, 3])
assert_array_equal(r[1], [1, 2, 3])
def test_signature_outer(self):
f = vectorize(np.outer, signature="(a),(b)->(a,b)")
r = f([1, 2], [1, 2, 3])
assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
r = f([[[1, 2]]], [1, 2, 3])
assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
r = f([[1, 0], [2, 0]], [1, 2, 3])
assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], [[2, 4, 6], [0, 0, 0]]])
r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], [[0, 0, 0], [0, 0, 0]]])
def test_signature_computed_size(self):
f = vectorize(lambda x: x[:-1], signature="(n)->(m)")
r = f([1, 2, 3])
assert_array_equal(r, [1, 2])
r = f([[1, 2, 3], [2, 3, 4]])
assert_array_equal(r, [[1, 2], [2, 3]])
def test_signature_excluded(self):
def foo(a, b=1):
return a + b
f = vectorize(foo, signature="()->()", excluded={"b"})
assert_array_equal(f([1, 2, 3]), [2, 3, 4])
assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
def test_signature_otypes(self):
f = vectorize(lambda x: x, signature="(n)->(n)", otypes=["float64"])
r = f([1, 2, 3])
assert_equal(r.dtype, np.dtype("float64"))
assert_array_equal(r, [1, 2, 3])
def test_signature_invalid_inputs(self):
f = vectorize(operator.add, signature="(n),(n)->(n)")
with assert_raises_regex(TypeError, "wrong number of positional"):
f([1, 2])
with assert_raises_regex(ValueError, "does not have enough dimensions"):
f(1, 2)
with assert_raises_regex(ValueError, "inconsistent size for core dimension"):
f([1, 2], [1, 2, 3])
f = vectorize(operator.add, signature="()->()")
with assert_raises_regex(TypeError, "wrong number of positional"):
f(1, 2)
def test_signature_invalid_outputs(self):
f = vectorize(lambda x: x[:-1], signature="(n)->(n)")
with assert_raises_regex(ValueError, "inconsistent size for core dimension"):
f([1, 2, 3])
f = vectorize(lambda x: x, signature="()->(),()")
with assert_raises_regex(ValueError, "wrong number of outputs"):
f(1)
f = vectorize(lambda x: (x, x), signature="()->()")
with assert_raises_regex(ValueError, "wrong number of outputs"):
f([1, 2])
def test_size_zero_output(self):
# see issue 5868
f = np.vectorize(lambda x: x)
x = np.zeros([0, 5], dtype=int)
with assert_raises_regex(ValueError, "otypes"):
f(x)
f.otypes = "i"
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature="()->()")
with assert_raises_regex(ValueError, "otypes"):
f(x)
f = np.vectorize(lambda x: x, signature="()->()", otypes="i")
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature="(n)->(n)", otypes="i")
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature="(n)->(n)")
assert_array_equal(f(x.T), x.T)
f = np.vectorize(lambda x: [x], signature="()->(n)", otypes="i")
with assert_raises_regex(ValueError, "new output dimensions"):
f(x)
@xpassIfTorchDynamo_np # (reason="TODO: implement")
|
TestVectorize
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/sqlite/json.py
|
{
"start": 303,
"end": 1491
}
|
class ____(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
:class:`_sqlite.JSON` is used automatically whenever the base
:class:`_types.JSON` datatype is used against a SQLite backend.
.. seealso::
:class:`_types.JSON` - main documentation for the generic
cross-platform JSON datatype.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
|
JSON
|
python
|
pytorch__pytorch
|
test/inductor/test_ck_backend.py
|
{
"start": 895,
"end": 14994
}
|
class ____(TestCase):
def setUp(self):
# The new inductor cache refresh mechanism
# introduced with https://github.com/pytorch/pytorch/pull/122661
# interacts badly with persistent subprocesses during
# autotuning. So we need to disable automatic cache refresh
# before calling setUp() on the parent class.
old_disable_fresh_cache_envvar = os.environ.get(
"INDUCTOR_TEST_DISABLE_FRESH_CACHE", ""
)
torch.random.manual_seed(1234)
self.ck_dir, _, _, _ = try_import_ck_lib()
if not self.ck_dir:
raise unittest.SkipTest("Composable Kernel library is not installed")
try:
os.environ["INDUCTOR_TEST_DISABLE_FRESH_CACHE"] = "1"
super().setUp()
finally:
os.environ["INDUCTOR_TEST_DISABLE_FRESH_CACHE"] = (
old_disable_fresh_cache_envvar
)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK", "CKTILE", "ATen,Triton,CK"))
@parametrize("autotune_in_subproc", (True, False))
@parametrize("use_aoti", (True, False))
def test_max_autotune_precompile_matmul(
self, max_autotune_gemm_backends, autotune_in_subproc, use_aoti
):
"""
Make sure autotuning mm doesn't crash.
"""
def mm(a, b):
return a @ b
tensor_options = {"device": "cuda", "dtype": torch.bfloat16}
a = torch.randn(2240, 256, **tensor_options)
b = torch.randn(256, 2048, **tensor_options)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": autotune_in_subproc,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 16,
"rocm.ck_max_profiling_configs": 8,
"rocm.ck_tile_max_profiling_configs": 8,
"rocm.ck_dir": self.ck_dir,
}
),
tf32_off(),
):
if use_aoti:
Y_compiled = AOTIRunnerUtil.run(
model=mm,
example_inputs=(a, b),
)
else:
@torch.compile(dynamic=False)
def compiled_mm(x, w):
return mm(x, w)
Y_compiled = compiled_mm(a, b)
Y = mm(a=a, b=b)
torch.testing.assert_close(Y_compiled, Y)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK",))
@parametrize("autotune_in_subproc", (True,))
def test_max_autotune_precompile_matmul_dynamic(
self, max_autotune_gemm_backends, autotune_in_subproc
):
"""
Test matmul with dynamic shapes
"""
tensor_options = {"device": "cuda", "dtype": torch.bfloat16}
a = torch.randn(2240, 256, **tensor_options)
b = torch.randn(256, 2048, **tensor_options)
torch._dynamo.mark_dynamic(a, 0)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": autotune_in_subproc,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 16,
"rocm.ck_max_profiling_configs": 8,
"rocm.ck_tile_max_profiling_configs": 8,
"rocm.ck_dir": self.ck_dir,
}
),
tf32_off(),
):
@torch.compile(dynamic=True)
def compiled_mm(a, b):
return a @ b
Y_compiled = compiled_mm(a, b)
Y = a @ b
torch.testing.assert_close(Y_compiled, Y)
a1 = torch.randn(1024, 256, **tensor_options)
Y1_compiled = compiled_mm(a1, b)
Y1 = a1 @ b
torch.testing.assert_close(Y1_compiled, Y1)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK", "ATen,Triton,CK"))
def test_max_autotune_precompile_preselected(self, max_autotune_gemm_backends):
"""
End to end test for picking preselected ck instances
"""
def mm(a, b):
return a @ b
tensor_options = {"device": "cuda", "dtype": torch.float16}
a = torch.randn(2240, 256, **tensor_options)
b = torch.randn(2048, 256, **tensor_options).transpose(0, 1)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 12,
"rocm.ck_dir": self.ck_dir,
"rocm.use_preselected_instances": True,
}
),
tf32_off(),
):
Y_compiled = torch.compile(mm, dynamic=False)(a, b)
Y = mm(a, b)
torch.testing.assert_close(Y_compiled, Y)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("Aten,CK",))
def test_max_autotune_precompile_non_contiguous(self, max_autotune_gemm_backends):
"""
Make sure the matmul with non-contiguous inputs can fallback
"""
tensor_options = {"device": "cuda", "dtype": torch.float16}
a = torch.empty_strided((50257, 32768), (1, 50304), **tensor_options)
b = torch.empty_strided((32768, 768), (768, 1), **tensor_options)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 16,
"rocm.ck_dir": self.ck_dir,
"rocm.ck_max_profiling_configs": 8,
"rocm.ck_tile_max_profiling_configs": 8,
}
),
tf32_off(),
):
@torch.compile(dynamic=False)
def mm(a, b):
return a @ b
Y_compiled = mm(a, b)
Y_eager = a @ b
torch.testing.assert_close(Y_compiled, Y_eager, equal_nan=True)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK", "ATen,Triton,CK"))
@parametrize("x_shape", ([4096, 2048], [2048], [4096, 1]))
def test_max_autotune_addmm(self, max_autotune_gemm_backends, x_shape):
m, k, n = 4096, 224, 2048
alpha, beta = 1.0, 1.0
tensor_options = {"device": "cuda", "dtype": torch.float16}
x = torch.ones(x_shape, **tensor_options)
a = torch.randn(m, k, **tensor_options)
b = torch.randn(k, n, **tensor_options)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 2,
"rocm.ck_dir": self.ck_dir,
"rocm.ck_max_profiling_configs": 2,
}
),
tf32_off(),
):
@torch.compile(dynamic=False)
def addmm(x, a, b, alpha, beta):
return torch.addmm(x, a, b, alpha=alpha, beta=beta)
Y_compiled = addmm(x, a, b, alpha, beta)
Y_eager = torch.addmm(x, a, b, alpha=alpha, beta=beta)
torch.testing.assert_close(Y_compiled, Y_eager)
@unittest.skip(
"FIXME(tenpercent): kernel compilation errors on gfx942 as of 09/01/25"
)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK", "ATen,Triton,CK"))
@parametrize("quantize_type", ("tensorwise", "rowwise"))
@parametrize("has_bias", (True, False))
def test_max_autotune_scaled_mm(
self, max_autotune_gemm_backends, quantize_type, has_bias
):
use_fast_accum = False
runtime_arch = torch.cuda.get_device_properties(0).gcnArchName
if "gfx94" not in runtime_arch and "gfx95" not in runtime_arch:
self.skipTest(f"Unsupported arch {runtime_arch}")
# output dtype
dtype = torch.bfloat16
tensor_options = {"device": "cuda", "dtype": dtype}
M = 2240
N = 2048
K = 256
x = torch.randn(M, K, **tensor_options)
w = torch.randn(N, K, **tensor_options)
bias = None
if has_bias:
bias = torch.randn(N, **tensor_options)
dtype_float8 = (
torch.float8_e4m3fnuz if "gfx94" in runtime_arch else torch.float8_e4m3fn
)
f_quantize = (
_quantize_tensorwise if quantize_type == "tensorwise" else _quantize_rowwise
)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = f_quantize(w, dtype_float8)
w_t_fp8 = w_fp8.t()
w_inverse_scale_t = w_inverse_scale.t()
# quantize input x
x_fp8, x_inverse_scale = f_quantize(x, dtype_float8)
assert "rocm" in dir(config)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale_t,
bias,
)
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 24,
"rocm.ck_max_profiling_configs": 24,
"rocm.ck_dir": self.ck_dir,
}
):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale_t,
bias,
)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(
os.environ,
{**_test_env, "PYTORCH_MIOPEN_SUGGEST_NHWC": "1"},
)
@parametrize("max_autotune_conv_backends", ("CK", "ATEN,CK,TRITON"))
def test_max_autotune_conv2d(self, max_autotune_conv_backends):
tensor_options = {"device": "cuda", "dtype": torch.float32}
x = torch.randn(1, 8, 224, 224, **tensor_options)
w = torch.randn(64, 8, 7, 7, **tensor_options)
x_cl = x.to(memory_format=torch.channels_last)
w_cl = w.to(memory_format=torch.channels_last)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"autotune_in_subproc": False,
"max_autotune_conv_backends": max_autotune_conv_backends,
"compile_threads": 4,
"rocm.ck_dir": self.ck_dir,
"rocm.ck_max_profiling_configs": 4,
}
),
tf32_off(),
):
@torch.compile(dynamic=False)
def conv2d(x, w):
return torch.conv2d(x, w)
Y_eager = torch.conv2d(x_cl, w_cl)
Y_compiled = conv2d(x_cl, w_cl)
torch.testing.assert_close(Y_compiled, Y_eager, atol=2e-4, rtol=2e-4)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@unittest.mock.patch.dict(os.environ, _test_env)
@parametrize("max_autotune_gemm_backends", ("CK", "ATen,Triton,CK"))
def test_max_autotune_precompile_bmm(
self,
max_autotune_gemm_backends,
):
"""
Test gemm-max-autotune torch.bmm with CK backend
"""
def bmm(a, b):
return torch.bmm(a, b)
tensor_options = {"device": "cuda", "dtype": torch.bfloat16}
a = torch.randn(16, 2240, 256, **tensor_options)
b = torch.randn(16, 2048, 256, **tensor_options).transpose(1, 2)
assert "rocm" in dir(config)
with (
config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"compile_threads": 2,
"rocm.ck_max_profiling_configs": 2,
"rocm.ck_dir": self.ck_dir,
}
),
tf32_off(),
):
@torch.compile(dynamic=False)
def compiled_bmm(x, w):
return bmm(x, w)
Y_compiled = compiled_bmm(a, b)
Y_eager = bmm(a=a, b=b)
torch.testing.assert_close(Y_compiled, Y_eager)
if __name__ == "__main__":
from torch._inductor.utils import is_big_gpu
# Set env to make it work in CI.
if HAS_CUDA_AND_TRITON and HAS_CPU and is_big_gpu():
run_tests()
|
TestCKBackend
|
python
|
bottlepy__bottle
|
test/test_formsdict.py
|
{
"start": 104,
"end": 513
}
|
class ____(unittest.TestCase):
def test_attr_access(self):
""" FomsDict.attribute returs string values as unicode. """
d = FormsDict(py3='瓶')
self.assertEqual('瓶', d.py3)
self.assertEqual('瓶', d["py3"])
def test_attr_missing(self):
""" FomsDict.attribute returs u'' on missing keys. """
d = FormsDict()
self.assertEqual('', d.missing)
|
TestFormsDict
|
python
|
google__jax
|
jax/_src/ad_checkpoint.py
|
{
"start": 17177,
"end": 41374
}
|
class ____:
val: Any
hash: int
hashable: bool
def __init__(self, val):
self.val = val
try:
self.hash = hash(val)
self.hashable = True
except:
self.hash = id(val)
self.hashable = False
def __hash__(self):
return self.hash
def __eq__(self, other):
if isinstance(other, WrapHashably):
if self.hashable and other.hashable:
return self.val == other.val
else:
return self.val is other.val
return False
# This caching is useful to avoid retracing even when static_argnums is used.
# See api_benchmark.py:bench_remat_eager_retracing_overheads_static_argnums.
# On that benchmark, including this caching makes a ~10x difference (which can
# be made arbitrary large by involving larger functions to be traced).
def _dyn_args_fun(fun: Callable, static_argnums: frozenset[int],
static_args: tuple[WrapHashably, ...], nargs: int):
if any(isinstance(x.val, core.Tracer) for x in static_args):
return _dyn_args_fun_uncached(fun, static_argnums, static_args, nargs)
return _dyn_args_fun_cached(fun, static_argnums, static_args, nargs)
def _dyn_args_fun_uncached(fun: Callable, static_argnums: frozenset[int],
static_args: tuple[WrapHashably, ...], nargs: int):
def new_fun(*dyn_args, **kwargs):
static_args_, dyn_args_ = iter(static_args), iter(dyn_args)
full_args = [next(static_args_).val if i in static_argnums
else next(dyn_args_) for i in range(nargs)]
return fun(*full_args, **kwargs)
return new_fun
_dyn_args_fun_cached = weakref_lru_cache(_dyn_args_fun_uncached)
# This helper is similar to those in control_flow/common.py, but with
# remat-specific errors.
@weakref_lru_cache
def _trace_to_jaxpr(fun: Callable,
in_tree: PyTreeDef,
in_avals: Sequence[core.AbstractValue],
debug: core.DebugInfo
) -> tuple[core.Jaxpr, Sequence[Any], PyTreeDef]:
flat_fun, out_tree = api_util.flatten_fun(lu.wrap_init(fun, debug_info=debug), in_tree)
try:
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals)
except core.ConcretizationTypeError as e:
msg, = e.args
if 'for checkpoint' in msg:
msg += "\n\n" + (
"Consider using the `static_argnums` parameter for `jax.remat` or "
"`jax.checkpoint`. See the `jax.checkpoint` docstring and its example "
"involving `static_argnums`:\n"
"https://docs.jax.dev/en/latest/_autosummary/jax.checkpoint.html"
"\n")
e.args = msg,
raise
return pe.convert_constvars_jaxpr(jaxpr), consts, out_tree()
### Utilities
def saved_residuals(f: Callable,
*args, **kwargs) -> list[tuple[core.AbstractValue, str]]:
in_leaves, in_tree = tree_flatten((args, kwargs))
def f_(*args):
args, kwargs = tree_unflatten(in_tree, args)
return f(*args, **kwargs)
debug_info = api_util.debug_info("saved_residuals", f, args, kwargs)
out = api.make_jaxpr(lambda *args: api.linearize(f_, *args),
return_shape=True)(*in_leaves)
assert isinstance(out, tuple)
jaxpr_, out_shape_ = out
jaxpr = jaxpr_.jaxpr
out_shape = out_shape_[1]
num_res = tree_structure(out_shape).num_leaves
jaxpr = jaxpr.replace(
outvars=jaxpr.outvars[len(jaxpr.outvars) - num_res:],
debug_info=debug_info._replace(result_paths=None))
assert len(jaxpr.invars) == len(in_leaves)
return _saved_residuals(jaxpr, debug_info.arg_names or ("unknown",) * len(jaxpr.invars))
def _saved_residuals(jaxpr: core.Jaxpr,
arg_names: Sequence[str]) -> list[tuple[core.AbstractValue, str]]:
res_lits = [x for x in jaxpr.outvars if isinstance(x, core.Literal)]
res_vars = {x for x in jaxpr.outvars if not isinstance(x, core.Literal)}
results = []
for x in res_lits:
results.append((x.aval, 'from a literal'))
for v in jaxpr.constvars:
if v in res_vars:
results.append((v.aval, 'from a constant'))
for i, v in enumerate(jaxpr.invars):
if v in res_vars:
if arg_names[i]:
src = f'from the argument {arg_names[i]}'
else:
src = 'from the argument at flattened index {i}'
results.append((v.aval, src))
named_vars = {v: e for e in jaxpr.eqns if e.primitive is name_p
for v in e.invars}
for eqn in jaxpr.eqns:
src = source_info_util.summarize(eqn.source_info)
for v in eqn.outvars:
if v in res_vars:
if eqn.primitive is name_p or v in named_vars and (eqn := named_vars[v]):
results.append((v.aval, f"named '{eqn.params['name']}' from {src}"))
elif eqn.primitive.name == 'jit':
results.append((v.aval,
f"output of jitted function '{eqn.params['name']}' "
f"from {src}"))
else:
results.append((v.aval, f'output of {eqn.primitive.name} from {src}'))
assert len(results) == len(jaxpr.outvars)
return results
def print_saved_residuals(f, *args, **kwargs):
for aval, src in saved_residuals(f, *args, **kwargs):
print(f'{aval.str_short(short_dtypes=True)} {src}')
### Implementation
remat_p = core.Primitive('remat2')
remat_p.multiple_results = True
def _remat_bind(*args, jaxpr, prevent_cse, differentiated, policy):
assert isinstance(prevent_cse, bool) or len(prevent_cse) == len(args)
return core.Primitive.bind(remat_p, *args, jaxpr=jaxpr, prevent_cse=prevent_cse,
differentiated=differentiated, policy=policy)
remat_p.bind = _remat_bind # type: ignore
@remat_p.def_impl
def remat_impl(*args, jaxpr, prevent_cse, differentiated, policy):
del prevent_cse, differentiated, policy # Unused.
return core.eval_jaxpr(jaxpr, (), *args)
@remat_p.def_effectful_abstract_eval
def remat_abstract_eval(*args, jaxpr, prevent_cse, differentiated, policy):
del args, prevent_cse, differentiated, policy # Unused.
return [v.aval for v in jaxpr.outvars], core.eqn_effects(jaxpr)
def remat_jvp(primals, tangents, jaxpr, prevent_cse, differentiated, policy):
assert not jaxpr.constvars
in_nonzeros = [type(t) is not ad_util.Zero for t in tangents]
jaxpr_jvp_, out_nz = ad.jvp_jaxpr(pe.close_jaxpr(jaxpr), in_nonzeros, False)
nonzero_tangents = [t for t in tangents if type(t) is not ad_util.Zero]
jaxpr_jvp = pe.convert_constvars_jaxpr(jaxpr_jvp_.jaxpr)
if isinstance(prevent_cse, tuple):
prevent_cse += (True,) * len(nonzero_tangents)
outs = remat_p.bind(
*jaxpr_jvp_.consts, *primals, *nonzero_tangents, jaxpr=jaxpr_jvp,
prevent_cse=prevent_cse, differentiated=differentiated, policy=policy)
out_primals, out_tangents_ = split_list(outs, [len(jaxpr.outvars)])
out_tangents_ = iter(out_tangents_)
out_tangents = [next(out_tangents_) if nz else ad_util.Zero.from_primal_value(p)
for p, nz in zip(out_primals, out_nz)]
return out_primals, out_tangents
ad.primitive_jvps[remat_p] = remat_jvp
def remat_partial_eval(trace: pe.JaxprTrace, *tracers: core.Tracer,
jaxpr: core.Jaxpr, prevent_cse, **params):
assert not jaxpr.constvars
disallowed_effects = effects.remat_allowed_effects.filter_not_in(jaxpr.effects)
if disallowed_effects:
raise NotImplementedError(
'Effects not supported in partial-eval of `checkpoint`/`remat`: '
f'{disallowed_effects}')
policy = params['policy'] or nothing_saveable
in_unknowns = [not t.is_known() for t in tracers]
jaxpr_known, jaxpr_staged, out_unknowns, out_inst, num_res = \
pe.partial_eval_jaxpr_custom(
jaxpr, in_unknowns, [True] * len(in_unknowns), False, False, policy)
# DCE jaxpr_staged, keeping only instantiated outputs which are unknown
_, out_inst_unknown = partition_list(out_inst, out_unknowns)
jaxpr_unknown, in_used_staged = pe.dce_jaxpr(jaxpr_staged, out_inst_unknown)
used_res, in_used_staged = split_list(in_used_staged, [num_res])
# DCE jaxpr_known, keeping all known outputs but discarding dce'd res
out_used_known = [True] * (len(out_unknowns) - sum(out_unknowns)) + used_res
jaxpr_known, in_used_known = pe.dce_jaxpr(jaxpr_known, out_used_known)
num_res = sum(used_res)
# To avoid precision mismatches in fwd and bwd passes due to XLA excess
# precision, insert explicit x = reduce_precision(x, **finfo(x.dtype)) calls
# on producers of any residuals. See https://github.com/jax-ml/jax/pull/22244.
jaxpr_known_ = _insert_reduce_precision(jaxpr_known, num_res)
# Compute known outputs and residuals (hoisted out of remat primitive)
_, in_consts_ = unzip2(t.pval for t in tracers if t.pval.is_known())
_, in_consts = partition_list(in_used_known, in_consts_)
out_consts = core.eval_jaxpr(jaxpr_known_, (), *in_consts)
out_knowns, residuals = split_list(out_consts, [len(out_consts)-num_res])
# set up unknown outputs with a recipe to call remat
res_tracers = map(trace.new_instantiated_const, residuals)
_, tracers_staged = partition_list(in_used_staged, tracers)
in_jaxpr_tracers = res_tracers + map(trace.instantiate_const, tracers_staged) # type: ignore
out_jaxpr_tracers = [pe.JaxprTracer(trace, pe.PartialVal.unknown(x.aval), None)
for x in jaxpr_unknown.outvars]
if isinstance(prevent_cse, tuple):
_, prevent_cse_ = partition_list(in_used_staged, prevent_cse)
prevent_cse = (True,) * len(res_tracers) + tuple(prevent_cse_)
new_params = dict(params, jaxpr=jaxpr_unknown, differentiated=True,
prevent_cse=prevent_cse)
recipe = pe.new_eqn_recipe(trace, in_jaxpr_tracers, out_jaxpr_tracers, remat_p,
new_params, core.eqn_effects(jaxpr_unknown),
source_info_util.current())
# log info about saved residuals
log_level = logging.WARNING if config.log_checkpoint_residuals.value else logging.DEBUG
if logger.isEnabledFor(log_level):
try:
_, staged_unk = partition_list(in_used_staged, in_unknowns)
res_invars, _ = partition_list(staged_unk, jaxpr_unknown.invars[num_res:])
res_outvars = jaxpr_known.outvars[len(jaxpr_known.outvars) - num_res:]
body_res = _saved_residuals(jaxpr_known.replace(outvars=res_outvars),
("",) * len(jaxpr_known.invars))
logger.log(log_level,
'remat-decorated function ' +
'saving inputs with shapes:\n' * bool(res_invars) +
' %s\n' * len(res_invars) +
'and ' * bool(res_invars) * bool(body_res) +
'saving these intermediates:\n' * bool(body_res) +
' %s from %s\n' * len(body_res),
*[v.aval.str_short() for v in res_invars],
*[elt for (a, s) in body_res for elt in [a.str_short(), s]])
except:
pass # just don't log anything on failure
for t in out_jaxpr_tracers: t.recipe = recipe
# zip together known and unknown outputs
return merge_lists(out_unknowns, out_knowns, out_jaxpr_tracers)
pe.custom_partial_eval_rules[remat_p] = remat_partial_eval
@weakref_lru_cache
def _insert_reduce_precision(jaxpr: core.Jaxpr, num_res: int) -> core.Jaxpr:
res_vars = jaxpr.outvars[len(jaxpr.outvars) - num_res:]
used_vars = {x for e in jaxpr.eqns for x in e.invars if isinstance(x, core.Var)}
invars, constvars, eqns = jaxpr.invars[:], jaxpr.constvars[:], jaxpr.eqns[:]
for v in res_vars:
if (not isinstance(v.aval, core.ShapedArray) or
not dtypes.issubdtype(v.aval.dtype, np.inexact)):
continue
if v not in used_vars:
continue
assert isinstance(v, core.Var)
newvar = core.Var(v.aval)
finfo = dtypes.finfo(v.aval.dtype)
params = dict(exponent_bits=finfo.nexp, mantissa_bits=finfo.nmant)
if v in constvars or v in invars:
lst = constvars if v in constvars else invars
new_eqn = core.new_jaxpr_eqn(
[newvar], [v], lax_internal.reduce_precision_p, params, set())
lst[lst.index(v)] = newvar
eqns.insert(0, new_eqn)
else:
(eqn_idx, eqn), = ((i, e) for i, e in enumerate(eqns) if v in e.outvars)
if (eqn.primitive == lax_internal.reduce_precision_p and
eqn.params == params):
continue
replace_eqn = eqn.replace(outvars=[v_ if v_ != v else newvar
for v_ in eqn.outvars])
new_eqn = core.new_jaxpr_eqn(
[newvar], [v], lax_internal.reduce_precision_p, params, set(),
eqn.source_info, eqn.ctx)
eqns[eqn_idx] = replace_eqn
eqns.insert(eqn_idx+1, new_eqn)
new_jaxpr = jaxpr.replace(invars=invars, constvars=constvars, eqns=eqns)
config.enable_checks.value and core.check_jaxpr(new_jaxpr)
return new_jaxpr
def remat_partial_eval_custom_params_updater(*args):
unks_in, inst_in, *_, params_known, params_staged = args
prevent_cse = params_known['prevent_cse']
assert prevent_cse == params_staged['prevent_cse']
if isinstance(prevent_cse, tuple):
prevent_cse_known, _ = partition_list(unks_in, prevent_cse)
_, prevent_cse_staged = partition_list(inst_in, prevent_cse)
params_known = dict(params_known, prevent_cse=tuple(prevent_cse_known))
params_staged = dict(params_staged, prevent_cse=tuple(prevent_cse_staged))
return params_known, dict(params_staged, differentiated=True)
pe.partial_eval_jaxpr_custom_rules[remat_p] = \
partial(pe.call_partial_eval_custom_rule, 'jaxpr',
remat_partial_eval_custom_params_updater)
def remat_transpose(out_cts, *in_primals, jaxpr, prevent_cse, **params):
assert not jaxpr.constvars
in_linear = [ad.is_undefined_primal(x) for x in in_primals]
out_zeros = [type(ct) is ad_util.Zero for ct in out_cts]
transposed_jaxpr_, in_zeros = transpose_jaxpr(
pe.close_jaxpr(jaxpr), in_linear, out_zeros)
transposed_jaxpr, consts = transposed_jaxpr_.jaxpr, transposed_jaxpr_.consts
transposed_jaxpr = pe.convert_constvars_jaxpr(transposed_jaxpr)
args, _ = tree_flatten((in_primals, out_cts))
if isinstance(prevent_cse, tuple):
prevent_cse_, _ = partition_list(in_linear, prevent_cse)
prevent_cse = tuple(prevent_cse_) + (True,) * (len(out_zeros) - sum(out_zeros))
in_cts_nz = remat_p.bind(*consts, *args, jaxpr=transposed_jaxpr,
prevent_cse=prevent_cse, **params)
in_cts_nz_, in_zeros_ = iter(in_cts_nz), iter(in_zeros)
in_cts = [None if not ad.is_undefined_primal(x) else
ad_util.Zero(x.aval) if next(in_zeros_) else next(in_cts_nz_)
for x in in_primals]
assert next(in_cts_nz_, None) is next(in_zeros_, None) is None
return in_cts
ad.primitive_transposes[remat_p] = remat_transpose
# TODO(mattjj): move this to ad.py
def transpose_jaxpr(jaxpr: core.ClosedJaxpr, in_linear: bool | Sequence[bool],
out_zeros: bool | Sequence[bool],
) -> tuple[core.ClosedJaxpr, list[bool]]:
if type(in_linear) is bool:
in_linear = (in_linear,) * len(jaxpr.in_avals)
if type(out_zeros) is bool:
out_zeros = (out_zeros,) * len(jaxpr.out_avals)
return _transpose_jaxpr(jaxpr, tuple(in_linear), tuple(out_zeros))
@weakref_lru_cache
def _transpose_jaxpr(jaxpr: core.ClosedJaxpr,
in_lin: Sequence[bool],
out_zeros: Sequence[bool]):
in_avals = ([a for a, lin in zip(jaxpr.in_avals, in_lin ) if not lin] +
[a for a, zero in zip(jaxpr.out_avals, out_zeros) if not zero])
cell = lambda: None
def transposed(*args_flat):
ins_flat, out_cts_flat = split_list(args_flat, [len(in_lin) - sum(in_lin)])
# Evaluate nonlinear parts using partial evaluation to get a linear jaxpr.
ins_iter = iter(ins_flat)
in_pvals = [pe.PartialVal.unknown(aval) if lin else
pe.PartialVal.known(next(ins_iter))
for aval, lin in zip(jaxpr.in_avals, in_lin)]
assert next(ins_iter, None) is None
# TODO(mattjj): revise not to require disabling checks
with config.mutable_array_checks(False):
jaxpr_rematted, lin_jaxpr, out_uk, res_avals = \
pe.partial_eval_jaxpr_nounits(jaxpr, in_lin, False)
with source_info_util.extend_name_stack('rematted_computation'):
consts = core.jaxpr_as_fun(jaxpr_rematted)(*ins_flat)
# Transpose the linear jaxpr (which only has linear inputs).
out_cts_iter = iter(out_cts_flat)
out_cts = [ad_util.Zero(aval) if zero else next(out_cts_iter)
for aval, zero in zip(jaxpr.out_avals, out_zeros)]
assert next(out_cts_iter, None) is None
dummy_args = [ad.UndefinedPrimal(aval) for aval in lin_jaxpr.in_avals[len(consts):]]
in_cts = ad.backward_pass(lin_jaxpr.jaxpr, False, lin_jaxpr.consts,
[*consts, *dummy_args], out_cts)
in_cts = in_cts[len(consts):]
# Identify symbolic zeros in the resulting cotangents, and return nonzeros.
in_zeros = cell.in_cts_zero = [type(ct) is ad_util.Zero for ct in in_cts]
in_cts_nz, _ = partition_list(in_zeros, in_cts)
return in_cts_nz
dbg = jaxpr.jaxpr.debug_info.with_unknown_names()
transposed_wrapped = lu.wrap_init(transposed, debug_info=dbg)
transposed_jaxpr_, _, consts = pe.trace_to_jaxpr_dynamic(
transposed_wrapped, in_avals)
transposed_jaxpr = core.ClosedJaxpr(transposed_jaxpr_, consts)
return transposed_jaxpr, cell.in_cts_zero # pytype: disable=attribute-error
def remat_vmap(axis_data, args, dims, *, jaxpr, **params):
assert not jaxpr.constvars
jaxpr_batched_, out_batched = batching.batch_jaxpr_axes(
pe.close_jaxpr(jaxpr), axis_data, dims,
[batching.zero_if_mapped] * len(jaxpr.outvars))
jaxpr_batched, consts = jaxpr_batched_.jaxpr, jaxpr_batched_.consts
if consts:
jaxpr_batched = pe.convert_constvars_jaxpr(jaxpr_batched)
out_dims = [0 if b else None for b in out_batched]
return remat_p.bind(*consts, *args, jaxpr=jaxpr_batched, **params), out_dims
batching.fancy_primitive_batchers[remat_p] = remat_vmap
# TODO(mattjj,sharadmv): de-duplicate with pe.dce_jaxpr_call_rule
def remat_dce(used_outputs: list[bool], eqn: core.JaxprEqn
) -> tuple[list[bool], core.JaxprEqn | None]:
if not any(used_outputs) and not pe.has_effects(eqn):
return [False] * len(eqn.invars), None
new_jaxpr, used_inputs = pe.dce_jaxpr(eqn.params['jaxpr'], used_outputs)
prevent_cse = eqn.params['prevent_cse']
if isinstance(prevent_cse, tuple):
prevent_cse = tuple(p for p, u in zip(prevent_cse, used_inputs) if u)
new_params = dict(eqn.params, jaxpr=new_jaxpr, prevent_cse=prevent_cse)
if (not any(used_inputs) and not any(used_outputs) and
_has_effects(new_jaxpr.effects)):
return used_inputs, None
else:
new_eqn = pe.new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[v for v, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, core.eqn_effects(new_jaxpr),
eqn.source_info, eqn.ctx)
return used_inputs, new_eqn
pe.dce_rules[remat_p] = remat_dce
def _has_effects(effects) -> bool:
return bool({e for e in effects if not isinstance(e, core.NamedAxisEffect)})
def remat_expansion(
*args, jaxpr: core.Jaxpr, prevent_cse: bool, differentiated: bool, **_
):
assert not jaxpr.constvars
if differentiated and prevent_cse:
translation_rule = _remat_translation_using_opt_barrier
else:
translation_rule = lambda *args, jaxpr: core.eval_jaxpr(jaxpr, (), *args)
return api.named_call(translation_rule, name="checkpoint")(*args, jaxpr=jaxpr)
def _remat_translation_using_opt_barrier(*args, jaxpr: core.Jaxpr):
args = lax_internal.optimization_barrier(args)
return core.eval_jaxpr(jaxpr, (), *args)
def _remat_lowering(
ctx: mlir.LoweringRuleContext,
*args,
jaxpr: core.Jaxpr,
prevent_cse: bool,
differentiated: bool,
policy,
):
if isinstance(prevent_cse, bool):
prevent_cse = (prevent_cse,) * len(ctx.avals_in) # type: ignore
assert isinstance(prevent_cse, tuple)
if differentiated and any(prevent_cse):
_, barrier_avals = partition_list(prevent_cse, ctx.avals_in)
other_args, barrier_args = partition_list(prevent_cse, args)
barrier_op = hlo.OptimizationBarrierOp(
mlir.flatten_ir_values(barrier_args))
barrier_results = mlir.unflatten_ir_values_like_types(
barrier_op.results, map(mlir.aval_to_ir_type, barrier_avals))
args = merge_lists(prevent_cse, other_args, barrier_results) # type: ignore
outs, tokens_out = mlir.jaxpr_subcomp(
ctx.module_context, jaxpr, ctx.name_stack.extend('checkpoint'),
ctx.tokens_in, (), *args, dim_var_values=ctx.dim_var_values,
const_lowering=ctx.const_lowering)
ctx.set_tokens_out(tokens_out)
return outs
mlir.register_lowering(remat_p, _remat_lowering)
def checkpoint_name(x, name):
return tree_map(partial(name_p.bind, name=name), x)
name_p.def_impl(lambda x, *, name: x)
name_p.def_abstract_eval(lambda x, *, name: x)
def name_jvp(primals, tangents, *, name):
(x,), (xdot,) = primals, tangents
return name_p.bind(x, name=name), xdot # don't name the tangent value
ad.primitive_jvps[name_p] = name_jvp
mlir.register_lowering(name_p, lambda ctx, x, *, name: [x])
def name_batcher(args, dims, *, name):
(x,), (d,) = args, dims
return name_p.bind(x, name=name), d
batching.primitive_batchers[name_p] = name_batcher
@functools.wraps(checkpoint)
def checkpoint_wrapper(
fun: Callable,
*,
concrete: bool = False,
prevent_cse: bool = True,
static_argnums: int | tuple[int, ...] = (),
policy: Callable[..., bool] | None = None,
) -> Callable:
if concrete:
msg = ("The 'concrete' option to jax.checkpoint / jax.remat is deprecated; "
"in its place, you can use its `static_argnums` option, and if "
"necessary the `jax.ensure_compile_time_eval()` context manager.\n"
"\n"
"For example, if using `concrete=True` for an `is_training` flag:\n"
"\n"
" from functools import partial\n"
"\n"
" @partial(jax.checkpoint, concrete=True)\n"
" def foo(x, is_training):\n"
" if is_training:\n"
" return f(x)\n"
" else:\n"
" return g(x)\n"
"\n"
"replace it with a use of `static_argnums`:\n"
"\n"
" @partial(jax.checkpoint, static_argnums=(1,))\n"
" def foo(x, is_training):\n"
" ...\n"
"\n"
"If jax.numpy operations need to be performed on static arguments, "
"we can use the `jax.ensure_compile_time_eval()` context manager. "
"For example, we can replace this use of `concrete=True`\n:"
"\n"
" @partial(jax.checkpoint, concrete=True)\n"
" def foo(x, y):\n"
" if y > 0:\n"
" return f(x)\n"
" else:\n"
" return g(x)\n"
"\n"
"with this combination of `static_argnums` and "
"`jax.ensure_compile_time_eval()`:\n"
"\n"
" @partial(jax.checkpoint, static_argnums=(1,))\n"
" def foo(x, y):\n"
" with jax.ensure_compile_time_eval():\n"
" y_pos = y > 0\n"
" if y_pos:\n"
" return f(x)\n"
" else:\n"
" return g(x)\n"
"\n"
"See https://docs.jax.dev/en/latest/jep/11830-new-remat-checkpoint.html\n")
raise NotImplementedError(msg)
return checkpoint(fun, prevent_cse=prevent_cse, policy=policy,
static_argnums=static_argnums)
@discharge.register_discharge_rule(remat_p)
def _remat_state_discharge_rule(
in_avals, out_avals, *args, jaxpr, **params):
discharged_jaxpr, () = discharge.discharge_state(jaxpr, [])
out_vals_ref_vals = remat_p.bind(*args, jaxpr=discharged_jaxpr, **params)
out_vals, ref_vals = split_list(out_vals_ref_vals, [len(jaxpr.outvars)])
ref_vals_ = iter(ref_vals)
new_invals = [next(ref_vals_) if isinstance(a, AbstractRef) else None
for a in in_avals]
assert next(ref_vals_, None) is None
return new_invals, out_vals
|
WrapHashably
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_associationproxy.py
|
{
"start": 110606,
"end": 112175
}
|
class ____(fixtures.DeclarativeMappedTest):
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
b_data = association_proxy("bs", "value")
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
aid = Column(ForeignKey("a.id"))
data = Column(String(50))
@property
def value(self):
return self.data
@value.setter
def value(self, value):
self.data = value
def test_get_ambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B(data="b1")])
eq_(a1.b_data[0], "b1")
def test_set_ambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B()])
a1.b_data[0] = "b1"
eq_(a1.b_data[0], "b1")
def test_get_classlevel_ambiguous(self):
A, B = self.classes("A", "B")
eq_(
str(A.b_data),
"AmbiguousAssociationProxyInstance"
"(AssociationProxy('bs', 'value'))",
)
def test_expr_ambiguous(self):
A, B = self.classes("A", "B")
assert_raises_message(
AttributeError,
"Association proxy A.bs refers to an attribute "
"'value' that is not directly mapped",
lambda: A.b_data == 5,
)
|
ProxyPlainPropertyTest
|
python
|
TheAlgorithms__Python
|
data_structures/linked_list/reverse_k_group.py
|
{
"start": 192,
"end": 3076
}
|
class ____:
def __init__(self, ints: Iterable[int]) -> None:
self.head: Node | None = None
for i in ints:
self.append(i)
def __iter__(self) -> Iterator[int]:
"""
>>> ints = []
>>> list(LinkedList(ints)) == ints
True
>>> ints = tuple(range(5))
>>> tuple(LinkedList(ints)) == ints
True
"""
node = self.head
while node:
yield node.data
node = node.next_node
def __len__(self) -> int:
"""
>>> for i in range(3):
... len(LinkedList(range(i))) == i
True
True
True
>>> len(LinkedList("abcdefgh"))
8
"""
return sum(1 for _ in self)
def __str__(self) -> str:
"""
>>> str(LinkedList([]))
''
>>> str(LinkedList(range(5)))
'0 -> 1 -> 2 -> 3 -> 4'
"""
return " -> ".join([str(node) for node in self])
def append(self, data: int) -> None:
"""
>>> ll = LinkedList([1, 2])
>>> tuple(ll)
(1, 2)
>>> ll.append(3)
>>> tuple(ll)
(1, 2, 3)
>>> ll.append(4)
>>> tuple(ll)
(1, 2, 3, 4)
>>> len(ll)
4
"""
if not self.head:
self.head = Node(data)
return
node = self.head
while node.next_node:
node = node.next_node
node.next_node = Node(data)
def reverse_k_nodes(self, group_size: int) -> None:
"""
reverse nodes within groups of size k
>>> ll = LinkedList([1, 2, 3, 4, 5])
>>> ll.reverse_k_nodes(2)
>>> tuple(ll)
(2, 1, 4, 3, 5)
>>> str(ll)
'2 -> 1 -> 4 -> 3 -> 5'
"""
if self.head is None or self.head.next_node is None:
return
length = len(self)
dummy_head = Node(0)
dummy_head.next_node = self.head
previous_node = dummy_head
while length >= group_size:
current_node = previous_node.next_node
assert current_node
next_node = current_node.next_node
for _ in range(1, group_size):
assert next_node, current_node
current_node.next_node = next_node.next_node
assert previous_node
next_node.next_node = previous_node.next_node
previous_node.next_node = next_node
next_node = current_node.next_node
previous_node = current_node
length -= group_size
self.head = dummy_head.next_node
if __name__ == "__main__":
import doctest
doctest.testmod()
ll = LinkedList([1, 2, 3, 4, 5])
print(f"Original Linked List: {ll}")
k = 2
ll.reverse_k_nodes(k)
print(f"After reversing groups of size {k}: {ll}")
|
LinkedList
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.