language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 31972,
"end": 32236
} | class ____(MixinNoReferrer, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.NoReferrerWhenDowngradePolicy"
}
resp_headers = {"Referrer-Policy": POLICY_NO_REFERRER.swapcase()}
| TestPolicyHeaderPrecedence002 |
python | pytorch__pytorch | torch/_dynamo/variables/base.py | {
"start": 6504,
"end": 7845
} | class ____(AttributeMutation):
"""
This case of VariableTracker.mutation_type marker indicates
1. Dynamo allows mutation on the value's attributes.
2. The value is created by the bytecode Dynamo is tracing through.
For instance, Dynamo could model a newly created object with this marker,
indicating that while we need to model mutations to this object, we don't
have to emit bytecode for these mutations if the object doesn't escape into
the Python world.
"""
def __init__(self, cls_source: Optional[Source] = None) -> None:
super().__init__(SourceType.New)
self.cls_source = cls_source
def _is_top_level_scope(scope_id: int) -> bool:
return scope_id == 1
def is_side_effect_safe(m: MutationType) -> bool:
scope_id = current_scope_id()
# In the top-level scope (if no HigherOrderOperators are involved),
# we are allowed to modify variables created in this scope as well
# as existing variables.
if _is_top_level_scope(scope_id):
return True
# Otherwise, only allow local mutation of variables created in the current scope
return m.scope == scope_id
# This helps users of `as_python_constant` to catch unimplemented error with
# more information; it inherits `NotImplementedError` for backward
# compatibility reasons.
| AttributeMutationNew |
python | doocs__leetcode | solution/3500-3599/3596.Minimum Cost Path with Alternating Directions I/Solution.py | {
"start": 0,
"end": 233
} | class ____:
def minCost(self, m: int, n: int) -> int:
if m == 1 and n == 1:
return 1
if m == 2 and n == 1:
return 3
if m == 1 and n == 2:
return 3
return -1
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/streamplot.py | {
"start": 20494,
"end": 26778
} | class ____(IndexError):
pass
def _integrate_rk12(x0, y0, dmap, f, maxlength, broken_streamlines=True,
integration_max_step_scale=1.0,
integration_max_error_scale=1.0):
"""
2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as RK4 and RK45 solvers (using
similar Python implementations) in most setups.
"""
# This error is below that needed to match the RK4 integrator. It
# is set for visual reasons -- too low and corners start
# appearing ugly and jagged. Can be tuned.
maxerror = 0.003 * integration_max_error_scale
# This limit is important (for all integrators) to avoid the
# trajectory skipping some mask cells. We could relax this
# condition if we use the code which is commented out below to
# increment the location gradually. However, due to the efficient
# nature of the interpolation, this doesn't boost speed by much
# for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
maxds *= integration_max_step_scale
ds = maxds
stotal = 0
xi = x0
yi = y0
xyf_traj = []
while True:
try:
if dmap.grid.within_grid(xi, yi):
xyf_traj.append((xi, yi))
else:
raise OutOfBounds
# Compute the two intermediate gradients.
# f should raise OutOfBounds if the locations given are
# outside the grid.
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x, yi + ds * k1y)
except OutOfBounds:
# Out of the domain during this step.
# Take an Euler step to the boundary to improve neatness
# unless the trajectory is currently empty.
if xyf_traj:
ds, xyf_traj = _euler_step(xyf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
ny, nx = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.hypot((dx2 - dx1) / (nx - 1), (dy2 - dy1) / (ny - 1))
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi, broken_streamlines)
except InvalidIndexError:
break
if stotal + ds > maxlength:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xyf_traj
def _euler_step(xyf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi, yi = xyf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xyf_traj.append((xi + cx * ds, yi + cy * ds))
return ds, xyf_traj
# Utility functions
# ========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(int)
y = yi.astype(int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = int(xi)
y = int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 1):
xn = x
else:
xn = x + 1
if y == (Ny - 1):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""
Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
direction = 'right'
for i in range(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| OutOfBounds |
python | ray-project__ray | python/ray/tune/syncer.py | {
"start": 242,
"end": 2172
} | class ____(TrainSyncConfig):
"""Configuration object for Tune file syncing to `RunConfig(storage_path)`.
In Ray Tune, here is where syncing (mainly uploading) happens:
The experiment driver (on the head node) syncs the experiment directory to storage
(which includes experiment state such as searcher state, the list of trials
and their statuses, and trial metadata).
It's also possible to sync artifacts from the trial directory to storage
by setting `sync_artifacts=True`.
For a Ray Tune run with many trials, each trial will upload its trial directory
to storage, which includes arbitrary files that you dumped during the run.
Args:
sync_period: Minimum time in seconds to wait between two sync operations.
A smaller ``sync_period`` will have the data in storage updated more often
but introduces more syncing overhead. Defaults to 5 minutes.
sync_timeout: Maximum time in seconds to wait for a sync process
to finish running. A sync operation will run for at most this long
before raising a `TimeoutError`. Defaults to 30 minutes.
sync_artifacts: [Beta] Whether or not to sync artifacts that are saved to the
trial directory (accessed via `ray.tune.get_context().get_trial_dir()`)
to the persistent storage configured via `tune.RunConfig(storage_path)`.
The trial or remote worker will try to launch an artifact syncing
operation every time `tune.report` happens, subject to `sync_period`
and `sync_artifacts_on_checkpoint`.
Defaults to False -- no artifacts are persisted by default.
sync_artifacts_on_checkpoint: If True, trial/worker artifacts are
forcefully synced on every reported checkpoint.
This only has an effect if `sync_artifacts` is True.
Defaults to True.
"""
pass
| SyncConfig |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_reader.py | {
"start": 22501,
"end": 25206
} | class ____(BaseDigest):
"""Data object describing the creation of an op inside a graph.
For size efficiency, this digest object does not contain any stack frames or
any references to them. To obtain the stack frames, use
`DataReader.read_graph_op_creation_stack_trace()`.
Properties (beyond the base class):
graph_id: Debugger-generated ID of the immediately-enclosing graph.
op_type: Type name of the op (e.g., "MatMul").
op_name: Name of the op (e.g., "dense_1/MatMul").
output_tensor_ids: Debugger-generated IDs for the output(s) of the op.
If the op produces no output tensor, this is `None`. Else, this is a
`tuple` of `int`s.
input_names: Names of the input tensors to the op.
device_name: The name of the device that the op is placed on (if available).
host_name: Name of the host on which the op is created.
stack_frame_ids: IDs of the frames of the stack trace at which the op
is created.
"""
def __init__(self,
wall_time,
locator,
graph_id,
op_type,
op_name,
output_tensor_ids,
host_name,
stack_frame_ids,
input_names=None,
device_name=None):
super().__init__(wall_time, locator)
self._graph_id = graph_id
self._op_type = op_type
self._op_name = op_name
self._output_tensor_ids = _tuple_or_none(output_tensor_ids)
self._host_name = host_name
self._stack_frame_ids = stack_frame_ids
self._input_names = _tuple_or_none(input_names)
self._device_name = device_name
@property
def graph_id(self):
return self._graph_id
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_tensor_ids(self):
return self._output_tensor_ids
@property
def num_outputs(self):
return len(self._output_tensor_ids) if self.output_tensor_ids else 0
@property
def input_names(self):
return self._input_names
@property
def device_name(self):
return self._device_name
@property
def host_name(self):
return self._host_name
@property
def stack_frame_ids(self):
return self._stack_frame_ids
def to_json(self):
output = super().to_json()
output.update({
"graph_id": self.graph_id,
"op_type": self.op_type,
"op_name": self.op_name,
"output_tensor_ids": self.output_tensor_ids,
"host_name": self.host_name,
"stack_frame_ids": self.stack_frame_ids,
"input_names": self.input_names,
"device_name": self.device_name,
})
return output
| GraphOpCreationDigest |
python | kamyu104__LeetCode-Solutions | Python/maximum-product-of-subsequences-with-an-alternating-sum-equal-to-k.py | {
"start": 83,
"end": 1071
} | class ____(object):
def maxProduct(self, nums, k, limit):
"""
:type nums: List[int]
:type k: int
:type limit: int
:rtype: int
"""
total = sum(nums)
if k > total or k < -total: # optimized to speed up
return -1
dp = collections.defaultdict(set)
for x in nums:
new_dp = collections.defaultdict(set, {k:set(v) for k, v in dp.iteritems()})
new_dp[(1, x)].add(min(x, limit+1))
for (p, total), products in dp.iteritems():
new_state = (p^1, total+(x if p == 0 else -x))
for v in products:
new_dp[new_state].add(min(v*x, limit+1))
dp = new_dp
result = -1
for (p, total), products in dp.iteritems():
if total != k:
continue
for v in products:
if v <= limit:
result = max(result, v)
return result
| Solution |
python | keras-team__keras | keras/src/legacy/saving/serialization.py | {
"start": 2901,
"end": 4397
} | class ____:
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find
already-loaded object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
| SharedObjectLoadingScope |
python | dask__dask | dask/_expr.py | {
"start": 32716,
"end": 32863
} | class ____(HLGExpr):
# Identical to HLGExpr
# Used internally to determine how output keys are supposed to be returned
pass
| _HLGExprGroup |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/test_callback.py | {
"start": 8030,
"end": 9339
} | class ____:
@pytest.mark.parametrize(
("callback_callable", "executor"),
[
pytest.param(empty_sync_callback_for_deadline_tests, "remote", id="with_executor"),
pytest.param(empty_sync_callback_for_deadline_tests, None, id="without_executor"),
pytest.param(qualname(empty_sync_callback_for_deadline_tests), None, id="importable_path"),
pytest.param(UNIMPORTABLE_DOT_PATH, None, id="unimportable_path"),
],
)
def test_init(self, callback_callable, executor):
callback = SyncCallback(TEST_CALLBACK_PATH, kwargs=TEST_CALLBACK_KWARGS, executor=executor)
assert callback.path == TEST_CALLBACK_PATH
assert callback.kwargs == TEST_CALLBACK_KWARGS
assert callback.executor == executor
assert isinstance(callback, Callback)
def test_serialize_deserialize(self):
callback = SyncCallback(TEST_CALLBACK_PATH, kwargs=TEST_CALLBACK_KWARGS, executor="local")
serialized = serialize(callback)
deserialized = cast("Callback", deserialize(serialized.copy()))
assert callback == deserialized
# While DeadlineReference lives in the SDK package, the unit tests to confirm it
# works need database access so they live in the models/test_deadline.py module.
| TestSyncCallback |
python | django__django | tests/custom_lookups/tests.py | {
"start": 3704,
"end": 4545
} | class ____(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, compiler, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(compiler, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = (*params, *rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
| YearLte |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/last_seen_updater.py | {
"start": 3366,
"end": 5755
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
def __init__(
self,
max_batch_size: int,
max_batch_time: float,
ingest_profile: str,
indexer_db: str,
) -> None:
from sentry.sentry_metrics.configuration import (
IndexerStorage,
UseCaseKey,
get_ingest_config,
)
self.config = get_ingest_config(UseCaseKey(ingest_profile), IndexerStorage(indexer_db))
self.__use_case_id = self.config.use_case_id
self.__max_batch_size = max_batch_size
self.__max_batch_time = max_batch_time
self.__metrics = get_metrics()
self.__prefilter = LastSeenUpdaterMessageFilter(metrics=self.__metrics)
def __should_accept(self, message: Message[KafkaPayload]) -> bool:
return not self.__prefilter.should_drop(message)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
def accumulator(result: set[int], value: BaseValue[set[int]]) -> set[int]:
result.update(value.payload)
return result
initial_value: Callable[[], set[int]] = lambda: set()
def do_update(message: Message[set[int]]) -> None:
table = TABLE_MAPPING[self.__use_case_id]
seen_ints = message.payload
keys_to_pass_to_update = len(seen_ints)
logger.debug("%s unique keys seen", keys_to_pass_to_update)
self.__metrics.incr(
"last_seen_updater.unique_update_candidate_keys", amount=keys_to_pass_to_update
)
with self.__metrics.timer("last_seen_updater.postgres_time"):
update_count = _update_stale_last_seen(table, seen_ints)
self.__metrics.incr("last_seen_updater.updated_rows_count", amount=update_count)
logger.debug("%s keys updated", update_count)
collect_step: Reduce[set[int], set[int]] = Reduce(
self.__max_batch_size,
self.__max_batch_time,
accumulator,
initial_value,
RunTask(do_update, CommitOffsets(commit)),
)
transform_step = RunTask(retrieve_db_read_keys, collect_step)
return FilterStep(self.__should_accept, transform_step, commit_policy=ONCE_PER_SECOND)
| LastSeenUpdaterStrategyFactory |
python | readthedocs__readthedocs.org | readthedocs/audit/tests/test_models.py | {
"start": 290,
"end": 3451
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(Project, users=[self.user])
self.organization = get(Organization, projects=[self.project])
get(OrganizationOwner, organization=self.organization, owner=self.user)
self.auditlog = get(
AuditLog,
user=self.user,
project=self.project,
organization=self.organization,
)
def test_user_deletion(self):
id = self.user.id
username = self.user.username
self.user.delete()
self.auditlog.refresh_from_db()
self.assertIsNone(self.auditlog.user)
self.assertEqual(self.auditlog.log_user_id, id)
self.assertEqual(self.auditlog.log_user_username, username)
def test_project_deletion(self):
id = self.project.id
slug = self.project.slug
self.project.delete()
self.auditlog.refresh_from_db()
self.assertIsNone(self.auditlog.project)
self.assertEqual(self.auditlog.log_project_id, id)
self.assertEqual(self.auditlog.log_project_slug, slug)
def test_organization_deletion(self):
id = self.organization.id
slug = self.organization.slug
self.organization.delete()
self.auditlog.refresh_from_db()
self.assertIsNone(self.auditlog.organization)
self.assertEqual(self.auditlog.log_organization_id, id)
self.assertEqual(self.auditlog.log_organization_slug, slug)
def test_log_attached_to_user_only(self):
log = get(
AuditLog,
user=self.user,
)
self.assertEqual(log.user, self.user)
self.assertEqual(log.log_user_id, self.user.id)
self.assertEqual(log.log_user_username, self.user.username)
def test_log_attached_to_project_with_organization_only(self):
log = get(
AuditLog,
project=self.project,
)
self.assertEqual(log.project, self.project)
self.assertEqual(log.log_project_id, self.project.id)
self.assertEqual(log.log_project_slug, self.project.slug)
self.assertEqual(log.organization, self.organization)
self.assertEqual(log.log_organization_id, self.organization.id)
self.assertEqual(log.log_organization_slug, self.organization.slug)
def test_log_attached_to_organization_only(self):
log = get(
AuditLog,
organization=self.organization,
)
self.assertEqual(log.organization, self.organization)
self.assertEqual(log.log_organization_id, self.organization.id)
self.assertEqual(log.log_organization_slug, self.organization.slug)
def test_truncate_browser(self):
text = "a" * 250
log = get(
AuditLog,
user=self.user,
browser=text,
)
self.assertEqual(log.browser, text)
text = "a" * 300
log = get(
AuditLog,
user=self.user,
browser=text,
)
self.assertNotEqual(log.browser, text)
self.assertTrue(log.browser.endswith(" - Truncated"))
| TestAuditModels |
python | tornadoweb__tornado | tornado/routing.py | {
"start": 23103,
"end": 25140
} | class ____(Rule):
"""Specifies mappings between URLs and handlers.
.. versionchanged: 4.5
`URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
backwards compatibility.
"""
def __init__(
self,
pattern: Union[str, Pattern],
handler: Any,
kwargs: Optional[Dict[str, Any]] = None,
name: Optional[str] = None,
) -> None:
"""Parameters:
* ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups
may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`~.web.Application.reverse_url`.
"""
matcher = PathMatches(pattern)
super().__init__(matcher, handler, kwargs, name)
self.regex = matcher.regex
self.handler_class = self.target
self.kwargs = kwargs
def __repr__(self) -> str:
return "{}({!r}, {}, kwargs={!r}, name={!r})".format(
self.__class__.__name__,
self.regex.pattern,
self.handler_class,
self.kwargs,
self.name,
)
@overload
def _unquote_or_none(s: str) -> bytes:
pass
@overload # noqa: F811
def _unquote_or_none(s: None) -> None:
pass
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811
"""None-safe wrapper around url_unescape to handle unmatched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return url_unescape(s, encoding=None, plus=False)
| URLSpec |
python | PrefectHQ__prefect | src/integrations/prefect-gitlab/prefect_gitlab/repositories.py | {
"start": 1812,
"end": 6479
} | class ____(ReadableDeploymentStorage):
"""
Interact with files stored in GitLab repositories.
An accessible installation of git is required for this block to function
properly.
"""
_block_type_name = "GitLab Repository"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/55edIimT4g9gbjhkh5a3Sp/dfdb9391d8f45c2e93e72e3a4d350771/gitlab-logo-500.png?h=250"
_description = "Interact with files stored in GitLab repositories."
repository: str = Field(
default=...,
description=(
"The URL of a GitLab repository to read from, in either HTTP/HTTPS or SSH format." # noqa
),
)
reference: Optional[str] = Field(
default=None,
description="An optional reference to pin to; can be a branch name or tag.",
)
git_depth: Optional[int] = Field(
default=1,
gte=1,
description="The number of commits that Git history is truncated to "
"during cloning. Set to None to fetch the entire history.",
)
credentials: Optional[GitLabCredentials] = Field(
default=None,
description="An optional GitLab Credentials block for authenticating with "
"private GitLab repos.",
)
def _create_repo_url(self) -> str:
"""Format the URL provided to the `git clone` command.
For private repos: https://<oauth-key>@gitlab.com/<username>/<repo>.git
All other repos should be the same as `self.repository`.
"""
url_components = urllib.parse.urlparse(self.repository)
if url_components.scheme in ["https", "http"] and self.credentials is not None:
token = self.credentials.token.get_secret_value()
updated_components = url_components._replace(
netloc=f"oauth2:{token}@{url_components.netloc}"
)
full_url = urllib.parse.urlunparse(updated_components)
else:
full_url = self.repository
return full_url
@staticmethod
def _get_paths(
dst_dir: Union[str, None], src_dir: str, sub_directory: Optional[str]
) -> Tuple[str, str]:
"""Returns the fully formed paths for GitLabRepository contents in the form
(content_source, content_destination).
"""
if dst_dir is None:
content_destination = Path(".").absolute()
else:
content_destination = Path(dst_dir)
content_source = Path(src_dir)
if sub_directory:
content_destination = content_destination.joinpath(sub_directory)
content_source = content_source.joinpath(sub_directory)
return str(content_source), str(content_destination)
@sync_compatible
@retry(
stop=stop_after_attempt(MAX_CLONE_ATTEMPTS),
wait=wait_fixed(CLONE_RETRY_MIN_DELAY_SECONDS)
+ wait_random(
CLONE_RETRY_MIN_DELAY_JITTER_SECONDS,
CLONE_RETRY_MAX_DELAY_JITTER_SECONDS,
),
reraise=True,
)
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""
Clones a GitLab project specified in `from_path` to the provided `local_path`;
defaults to cloning the repository reference configured on the Block to the
present working directory.
Args:
from_path: If provided, interpreted as a subdirectory of the underlying
repository that will be copied to the provided local path.
local_path: A local path to clone to; defaults to present working directory.
"""
# CONSTRUCT COMMAND
cmd = ["git", "clone", self._create_repo_url()]
if self.reference:
cmd += ["-b", self.reference]
# Limit git history
if self.git_depth is not None:
cmd += ["--depth", f"{self.git_depth}"]
# Clone to a temporary directory and move the subdirectory over
with TemporaryDirectory(suffix="prefect") as tmp_dir:
cmd.append(tmp_dir)
err_stream = io.StringIO()
out_stream = io.StringIO()
process = await run_process(cmd, stream_output=(out_stream, err_stream))
if process.returncode != 0:
err_stream.seek(0)
raise OSError(f"Failed to pull from remote:\n {err_stream.read()}")
content_source, content_destination = self._get_paths(
dst_dir=local_path, src_dir=tmp_dir, sub_directory=from_path
)
shutil.copytree(
src=content_source, dst=content_destination, dirs_exist_ok=True
)
| GitLabRepository |
python | kamyu104__LeetCode-Solutions | Python/largest-triangle-area.py | {
"start": 32,
"end": 844
} | class ____(object):
def largestTriangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
result = 0
for i in xrange(len(points)-2):
for j in xrange(i+1, len(points)-1):
for k in xrange(j+1, len(points)):
result = max(result,
0.5 * abs(points[i][0] * points[j][1] +
points[j][0] * points[k][1] +
points[k][0] * points[i][1] -
points[j][0] * points[i][1] -
points[k][0] * points[j][1] -
points[i][0] * points[k][1]))
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/decode-string.py | {
"start": 29,
"end": 608
} | class ____(object):
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
n, curr, nums, strs = 0, [], [], []
for c in s:
if c.isdigit():
n = n*10 + ord(c)-ord('0')
elif c.isalpha():
curr.append(c)
elif c == '[':
nums.append(n)
strs.append(curr)
n, curr = 0, []
elif c == ']':
strs[-1].extend(curr*nums.pop())
curr = strs.pop()
return "".join(curr)
| Solution |
python | skorch-dev__skorch | skorch/tests/test_doctor.py | {
"start": 22074,
"end": 29178
} | class ____:
"""Tests based on a more non-standard model
Specifically, add modules with non-standard names and non-standard outputs
like tuples or dicts.
This test class does not re-iterate all the tests performed on the standard
model but focuses on the parts that change, e.g. how the outputs are
recorded.
"""
@pytest.fixture(scope='module')
def module0_cls(self):
"""Module that returns a tuple"""
class MyModule(nn.Module):
"""Module that returns a tuple, lin0 no grad"""
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(20, 20)
self.lin0.requires_grad_(False)
self.lin1 = nn.Linear(20, 2)
def forward(self, X):
X0 = self.lin0(X)
X1 = self.lin1(X)
return X0, X1
return MyModule
@pytest.fixture(scope='module')
def module1_cls(self):
"""Module without learnable params that returns a dict"""
class MyModule(nn.Module):
"""Module that returns a dict"""
def __init__(self):
super().__init__()
self.softmax = nn.Softmax(dim=-1)
def forward(self, X):
softmax = self.softmax(X)
return {'logits': X, 'softmax': softmax}
return MyModule
@pytest.fixture(scope='module')
def module2_cls(self, module0_cls, module1_cls):
"""Module that combines module0 and module1"""
class MyModule(nn.Module):
"""Module that returns a dict"""
def __init__(self):
super().__init__()
self.module0 = module0_cls()
self.module1 = module1_cls()
def forward(self, X):
_, X1 = self.module0(X)
output = self.module1(X1)
return output['softmax']
return MyModule
@pytest.fixture(scope='module')
def criterion_cls(self):
class MyCriterion(nn.Module):
"""Criterion that has learnable parameters"""
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(2, 2)
# pylint: disable=arguments-differ
def forward(self, y_proba, y):
y_proba = self.lin0(y_proba)
return nn.functional.nll_loss(y_proba, y)
return MyCriterion
@pytest.fixture(scope='module')
def net_cls(self, module2_cls):
"""Customize net to work with complex modules"""
from skorch import NeuralNetClassifier
from skorch.utils import to_tensor
class MyNet(NeuralNetClassifier):
"""Customize net that works with non-standard modules"""
def initialize_module(self):
kwargs = self.get_params_for('mymodule')
module = self.initialized_instance(module2_cls, kwargs)
# pylint: disable=attribute-defined-outside-init
self.mymodule_ = module
self.seq_ = nn.Sequential(nn.Linear(2, 2))
return self
def initialize_criterion(self):
# use non-standard name 'mycriterion'
kwargs = self.get_params_for('mycriterion')
criterion = self.initialized_instance(self.criterion, kwargs)
# pylint: disable=attribute-defined-outside-init
self.mycriterion_ = criterion
return self
def infer(self, x, **fit_params):
x = to_tensor(x, device=self.device)
x = self.mymodule_(x, **fit_params)
x = x + self.seq_(x)
return x
def get_loss(self, y_pred, y_true, *args, **kwargs):
y_true = to_tensor(y_true, device=self.device)
return self.mycriterion_(y_pred, y_true)
return MyNet
@pytest.fixture(scope='module')
def doctor_cls(self):
from skorch.helper import SkorchDoctor
return SkorchDoctor
@pytest.fixture(scope='module')
def data(self, classifier_data):
X, y = classifier_data
# a small amount of data is enough
return X[:50], y[:50]
@pytest.fixture(scope='module')
def doctor(self, module0_cls, criterion_cls, net_cls, doctor_cls, data):
# the passed module doesn't matter as they are hard-coded
torch.manual_seed(0)
net = net_cls(
module0_cls, criterion=criterion_cls, max_epochs=3, batch_size=32
)
doctor = doctor_cls(net)
doctor.fit(*data)
return doctor
def test_activation_recs_general_content(self, doctor):
recs = doctor.activation_recs_
assert set(recs.keys()) == {'mymodule', 'seq', 'mycriterion'}
for rec in recs.values():
# 3 epochs, 2 batches per epoch
assert len(rec) == 6
expected_mymodule = {
'module0.lin0', 'module0.lin1', 'module0[0]', 'module0[1]',
'module1.softmax', 'module1["logits"]', 'module1["softmax"]',
}
assert set(recs['mymodule'][0].keys()) == expected_mymodule
# nn.Sequential just enumerates the layers
assert set(recs['seq'][0].keys()) == {'0'}
assert set(recs['mycriterion'][0].keys()) == {'lin0'}
def test_gradient_recs_general_content(self, doctor):
recs = doctor.gradient_recs_
assert len(recs) == 3
assert set(recs.keys()) == {'mymodule', 'seq', 'mycriterion'}
for rec in recs.values():
# 3 epochs, 2 batches per epoch
assert len(rec) == 6
# each batch has weights and biases for lin1, lin0 has no gradient
expected = {'module0.lin1.weight', 'module0.lin1.bias'}
for batch in recs['mymodule']:
assert set(batch.keys()) == expected
# each batch has weights and biases for lin1, lin0 has no gradient
expected = {'0.weight', '0.bias'}
for batch in recs['seq']:
assert set(batch.keys()) == expected
# each batch has weights and biases for lin1, lin0 has no gradient
expected = {'lin0.weight', 'lin0.bias'}
for batch in recs['mycriterion']:
assert set(batch.keys()) == expected
def test_get_layer_names(self, doctor):
layer_names = doctor.get_layer_names()
expected = {
'mymodule': [
'module0.lin0', 'module0.lin1', 'module0[0]', 'module0[1]',
'module1.softmax', 'module1["logits"]', 'module1["softmax"]',
],
'seq': ['0'],
'mycriterion': ['lin0'],
}
assert layer_names == expected
def test_get_parameter_names(self, doctor):
param_names = doctor.get_param_names()
expected = {
'mymodule': ['module0.lin1.weight', 'module0.lin1.bias'],
'seq': ['0.weight', '0.bias'],
'mycriterion': ['lin0.weight', 'lin0.bias'],
}
assert param_names == expected
| TestSkorchDoctorComplexArchitecture |
python | django-haystack__django-haystack | haystack/query.py | {
"start": 24415,
"end": 26069
} | class ____(SearchQuerySet):
"""
A variant of the SearchQuerySet that can handle `load_all_queryset`s.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._load_all_querysets = {}
self._result_cache = []
def _load_model_objects(self, model, pks):
if model in self._load_all_querysets:
# Use the overriding queryset.
return self._load_all_querysets[model].in_bulk(pks)
else:
# Check the SearchIndex for the model for an override.
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
qs = index.load_all_queryset()
return qs.in_bulk(pks)
except NotHandled:
# The model returned doesn't seem to be handled by the
# routers. We should silently fail and populate
# nothing for those objects.
return {}
def load_all_queryset(self, model, queryset):
"""
Allows for specifying a custom ``QuerySet`` that changes how ``load_all``
will fetch records for the provided model.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
"""
clone = self._clone()
clone._load_all_querysets[model] = queryset
return clone
def _clone(self, klass=None):
clone = super()._clone(klass=klass)
clone._load_all_querysets = self._load_all_querysets
return clone
| RelatedSearchQuerySet |
python | pytorch__pytorch | torch/nn/modules/_functions.py | {
"start": 120,
"end": 8432
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(
self,
input,
weight,
bias,
running_mean,
running_var,
eps,
momentum,
process_group,
world_size,
):
if not (
input.is_contiguous(memory_format=torch.channels_last)
or input.is_contiguous(memory_format=torch.channels_last_3d)
):
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
size = int(input.numel() // input.size(1))
if size == 1 and world_size < 2:
raise ValueError(
f"Expected more than 1 value per channel when training, got input size {size}"
)
num_channels = input.shape[1]
if input.numel() > 0:
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count = torch.full(
(1,),
input.numel() // input.size(1),
dtype=mean.dtype,
device=mean.device,
)
# C, C, 1 -> (2C + 1)
combined = torch.cat([mean, invstd, count], dim=0)
else:
# for empty input, set stats and the count to zero. The stats with
# zero count will be filtered out later when computing global mean
# & invstd, but they still needs to participate the all_gather
# collective communication to unblock other peer processes.
combined = torch.zeros(
2 * num_channels + 1, dtype=input.dtype, device=input.device
)
# Use allgather instead of allreduce because count could be different across
# ranks, simple all reduce op can not give correct results.
# batch_norm_gather_stats_with_counts calculates global mean & invstd based on
# all gathered mean, invstd and count.
# for nccl backend, use the optimized version of all gather.
# The Gloo backend does not support `all_gather_into_tensor`.
if process_group._get_backend_name() != "gloo":
# world_size * (2C + 1)
combined_size = combined.numel()
combined_flat = torch.empty(
1,
combined_size * world_size,
dtype=combined.dtype,
device=combined.device,
)
dist.all_gather_into_tensor(
combined_flat, combined, process_group, async_op=False
)
combined = torch.reshape(combined_flat, (world_size, combined_size))
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
else:
# world_size * (2C + 1)
combined_list = [torch.empty_like(combined) for _ in range(world_size)]
dist.all_gather(combined_list, combined, process_group, async_op=False)
combined = torch.stack(combined_list, dim=0)
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
# The lines below force a synchronization between CUDA and CPU, because
# the shape of the result count_all depends on the values in mask tensor.
# Such synchronizations break CUDA Graph capturing.
# See https://github.com/pytorch/pytorch/issues/78549
# FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
# a better longer-term solution.
# remove stats from empty inputs
mask = count_all.squeeze(-1) >= 1
count_all = count_all[mask]
mean_all = mean_all[mask]
invstd_all = invstd_all[mask]
# calculate global mean & invstd
counts = count_all.view(-1)
if running_mean is not None and counts.dtype != running_mean.dtype:
counts = counts.to(running_mean.dtype)
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts,
)
self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
self.process_group = process_group
# apply element-wise normalization
if input.numel() > 0:
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
else:
return torch.empty_like(input)
@staticmethod
def backward(self, grad_output):
if not (
grad_output.is_contiguous(memory_format=torch.channels_last)
or grad_output.is_contiguous(memory_format=torch.channels_last_3d)
):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
grad_input = grad_weight = grad_bias = None
process_group = self.process_group
if saved_input.numel() > 0:
# calculate local stats as well as grad_weight / grad_bias
(
sum_dy,
sum_dy_xmu,
grad_weight,
grad_bias,
) = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
self.needs_input_grad[0],
self.needs_input_grad[1],
self.needs_input_grad[2],
)
if self.needs_input_grad[0]:
# synchronizing stats used to calculate input gradient.
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined,
torch.distributed.ReduceOp.SUM,
process_group,
async_op=False,
)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
# backward pass for gradient calculation
if weight is not None and weight.dtype != mean.dtype:
weight = weight.to(mean.dtype)
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor,
)
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not self.needs_input_grad[1]:
grad_weight = None
if weight is None or not self.needs_input_grad[2]:
grad_bias = None
else:
# This process got an empty input tensor in the forward pass.
# Although this process can directly set grad_input as an empty
# tensor of zeros, it still needs to participate in the collective
# communication to unblock its peers, as other peer processes might
# have received non-empty inputs.
num_channels = saved_input.shape[1]
if self.needs_input_grad[0]:
# launch all_reduce to unblock other peer processes
combined = torch.zeros(
2 * num_channels, dtype=saved_input.dtype, device=saved_input.device
)
torch.distributed.all_reduce(
combined,
torch.distributed.ReduceOp.SUM,
process_group,
async_op=False,
)
# Leave grad_input, grad_weight and grad_bias as None, which will be
# interpreted by the autograd engine as Tensors full of zeros.
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
| SyncBatchNorm |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 1834,
"end": 12926
} | class ____:
def test_input_validation(self):
message = "The input `precision` must be a square, two-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaPrecision(np.ones(2))
message = "`precision.shape` must equal `covariance.shape`."
with pytest.raises(ValueError, match=message):
_covariance.CovViaPrecision(np.eye(3), covariance=np.eye(2))
message = "The input `diagonal` must be a one-dimensional array..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaDiagonal("alpaca")
message = "The input `cholesky` must be a square, two-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaCholesky(np.ones(2))
message = "The input `eigenvalues` must be a one-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition(("alpaca", np.eye(2)))
message = "The input `eigenvectors` must be a square..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition((np.ones(2), "alpaca"))
message = "The shapes of `eigenvalues` and `eigenvectors` must be..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition(([1, 2, 3], np.eye(2)))
_covariance_preprocessing = {"Diagonal": np.diag,
"Precision": np.linalg.inv,
"Cholesky": np.linalg.cholesky,
"Eigendecomposition": np.linalg.eigh,
"PSD": lambda x:
_PSD(x, allow_singular=True)}
_all_covariance_types = np.array(list(_covariance_preprocessing))
_matrices = {"diagonal full rank": np.diag([1, 2, 3]),
"general full rank": [[5, 1, 3], [1, 6, 4], [3, 4, 7]],
"diagonal singular": np.diag([1, 0, 3]),
"general singular": [[5, -1, 0], [-1, 5, 0], [0, 0, 0]]}
_cov_types = {"diagonal full rank": _all_covariance_types,
"general full rank": _all_covariance_types[1:],
"diagonal singular": _all_covariance_types[[0, -2, -1]],
"general singular": _all_covariance_types[-2:]}
@pytest.mark.parametrize("cov_type_name", _all_covariance_types[:-1])
def test_factories(self, cov_type_name):
A = np.diag([1, 2, 3])
x = [-4, 2, 5]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
factory = getattr(Covariance, f"from_{cov_type_name.lower()}")
res = factory(preprocessing(A))
ref = cov_type(preprocessing(A))
assert type(res) is type(ref)
assert_allclose(res.whiten(x), ref.whiten(x))
@pytest.mark.parametrize("matrix_type", list(_matrices))
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_covariance(self, matrix_type, cov_type_name):
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
"matrices")
if cov_type_name not in self._cov_types[matrix_type]:
pytest.skip(message)
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
psd = _PSD(A, allow_singular=True)
# test properties
cov_object = cov_type(preprocessing(A))
assert_close(cov_object.log_pdet, psd.log_pdet)
assert_equal(cov_object.rank, psd.rank)
assert_equal(cov_object.shape, np.asarray(A).shape)
assert_close(cov_object.covariance, np.asarray(A))
# test whitening/coloring 1D x
rng = np.random.default_rng(5292808890472453840)
x = rng.random(size=3)
res = cov_object.whiten(x)
ref = x @ psd.U
# res != ref in general; but res @ res == ref @ ref
assert_close(res @ res, ref @ ref)
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
# CovViaPSD does not have _colorize
assert_close(cov_object.colorize(res), x)
# test whitening/coloring 3D x
x = rng.random(size=(2, 4, 3))
res = cov_object.whiten(x)
ref = x @ psd.U
assert_close((res**2).sum(axis=-1), (ref**2).sum(axis=-1))
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
assert_close(cov_object.colorize(res), x)
# gh-19197 reported that multivariate normal `rvs` produced incorrect
# results when a singular Covariance object was produce using
# `from_eigenvalues`. This was due to an issue in `colorize` with
# singular covariance matrices. Check this edge case, which is skipped
# in the previous tests.
if hasattr(cov_object, "_colorize"):
res = cov_object.colorize(np.eye(len(A)))
assert_close(res.T @ res, A)
@pytest.mark.parametrize("size", [None, tuple(), 1, (2, 4, 3)])
@pytest.mark.parametrize("matrix_type", list(_matrices))
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_mvn_with_covariance(self, size, matrix_type, cov_type_name):
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
"matrices")
if cov_type_name not in self._cov_types[matrix_type]:
pytest.skip(message)
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
mean = [0.1, 0.2, 0.3]
cov_object = cov_type(preprocessing(A))
mvn = multivariate_normal
dist0 = multivariate_normal(mean, A, allow_singular=True)
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
rng = np.random.default_rng(5292808890472453840)
x = rng.multivariate_normal(mean, A, size=size)
rng = np.random.default_rng(5292808890472453840)
x1 = mvn.rvs(mean, cov_object, size=size, random_state=rng)
rng = np.random.default_rng(5292808890472453840)
x2 = mvn(mean, cov_object, seed=rng).rvs(size=size)
if isinstance(cov_object, _covariance.CovViaPSD):
assert_close(x1, np.squeeze(x)) # for backward compatibility
assert_close(x2, np.squeeze(x))
else:
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
assert_close(x2, x1)
assert_close(mvn.pdf(x, mean, cov_object), dist0.pdf(x))
assert_close(dist1.pdf(x), dist0.pdf(x))
assert_close(mvn.logpdf(x, mean, cov_object), dist0.logpdf(x))
assert_close(dist1.logpdf(x), dist0.logpdf(x))
assert_close(mvn.entropy(mean, cov_object), dist0.entropy())
assert_close(dist1.entropy(), dist0.entropy())
@pytest.mark.parametrize("size", [tuple(), (2, 4, 3)])
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_mvn_with_covariance_cdf(self, size, cov_type_name):
# This is split from the test above because it's slow to be running
# with all matrix types, and there's no need because _mvn.mvnun
# does the calculation. All Covariance needs to do is pass is
# provide the `covariance` attribute.
matrix_type = "diagonal full rank"
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
mean = [0.1, 0.2, 0.3]
cov_object = cov_type(preprocessing(A))
mvn = multivariate_normal
dist0 = multivariate_normal(mean, A, allow_singular=True)
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
rng = np.random.default_rng(5292808890472453840)
x = rng.multivariate_normal(mean, A, size=size)
assert_close(mvn.cdf(x, mean, cov_object), dist0.cdf(x))
assert_close(dist1.cdf(x), dist0.cdf(x))
assert_close(mvn.logcdf(x, mean, cov_object), dist0.logcdf(x))
assert_close(dist1.logcdf(x), dist0.logcdf(x))
def test_covariance_instantiation(self):
message = "The `Covariance` class cannot be instantiated directly."
with pytest.raises(NotImplementedError, match=message):
Covariance()
@pytest.mark.filterwarnings("ignore::RuntimeWarning") # matrix not PSD
def test_gh9942(self):
# Originally there was a mistake in the `multivariate_normal_frozen`
# `rvs` method that caused all covariance objects to be processed as
# a `_CovViaPSD`. Ensure that this is resolved.
A = np.diag([1, 2, -1e-8])
n = A.shape[0]
mean = np.zeros(n)
# Error if the matrix is processed as a `_CovViaPSD`
with pytest.raises(ValueError, match="The input matrix must be..."):
multivariate_normal(mean, A).rvs()
# No error if it is provided as a `CovViaEigendecomposition`
seed = 3562050283508273023
rng1 = np.random.default_rng(seed)
rng2 = np.random.default_rng(seed)
cov = Covariance.from_eigendecomposition(np.linalg.eigh(A))
rv = multivariate_normal(mean, cov)
res = rv.rvs(random_state=rng1)
ref = multivariate_normal.rvs(mean, cov, random_state=rng2)
assert_equal(res, ref)
def test_gh19197(self):
# gh-19197 reported that multivariate normal `rvs` produced incorrect
# results when a singular Covariance object was produce using
# `from_eigenvalues`. Check that this specific issue is resolved;
# a more general test is included in `test_covariance`.
mean = np.ones(2)
cov = Covariance.from_eigendecomposition((np.zeros(2), np.eye(2)))
dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
rvs = dist.rvs(size=None)
assert_equal(rvs, mean)
cov = scipy.stats.Covariance.from_eigendecomposition(
(np.array([1., 0.]), np.array([[1., 0.], [0., 400.]])))
dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
rvs = dist.rvs(size=None)
assert rvs[0] != mean[0]
assert rvs[1] == mean[1]
def _random_covariance(dim, evals, rng, singular=False):
# Generates random covariance matrix with dimensionality `dim` and
# eigenvalues `evals` using provided Generator `rng`. Randomly sets
# some evals to zero if `singular` is True.
A = rng.random((dim, dim))
A = A @ A.T
_, v = np.linalg.eigh(A)
if singular:
zero_eigs = rng.normal(size=dim) > 0
evals[zero_eigs] = 0
cov = v @ np.diag(evals) @ v.T
return cov
def _sample_orthonormal_matrix(n):
rng = np.random.default_rng(9086764251)
M = rng.standard_normal((n, n))
u, s, v = scipy.linalg.svd(M)
return u
@dataclass
| TestCovariance |
python | kamyu104__LeetCode-Solutions | Python/kth-smallest-element-in-a-bst.py | {
"start": 586,
"end": 1037
} | class ____(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
def gen_inorder(root):
if root:
for n in gen_inorder(root.left):
yield n
yield root.val
for n in gen_inorder(root.right):
yield n
return next(islice(gen_inorder(root), k-1, k))
| Solution2 |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/errors.py | {
"start": 1595,
"end": 1700
} | class ____:
config_type_snap: ConfigTypeSnap
value_rep: str
@record_custom
| RuntimeMismatchErrorData |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_rds.py | {
"start": 25736,
"end": 28529
} | class ____:
@classmethod
def setup_class(cls):
cls.dag = DAG(
dag_id="test_dag",
schedule=None,
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
)
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_aws
def test_create_event_subscription(self):
_create_db_instance(self.hook)
create_subscription_operator = RdsCreateEventSubscriptionOperator(
task_id="test_create",
subscription_name=SUBSCRIPTION_NAME,
sns_topic_arn=SUBSCRIPTION_TOPIC,
source_type="db-instance",
source_ids=[DB_INSTANCE_NAME],
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(create_subscription_operator.hook)
create_subscription_operator.execute(None)
result = self.hook.conn.describe_event_subscriptions(SubscriptionName=SUBSCRIPTION_NAME)
subscriptions = result.get("EventSubscriptionsList")
assert subscriptions
assert len(subscriptions) == 1
assert subscriptions[0]["Status"] == "active"
@mock_aws
@patch.object(RdsHook, "wait_for_event_subscription_state")
def test_create_event_subscription_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
create_subscription_operator = RdsCreateEventSubscriptionOperator(
task_id="test_create_no_wait",
subscription_name=SUBSCRIPTION_NAME,
sns_topic_arn=SUBSCRIPTION_TOPIC,
source_type="db-instance",
source_ids=[DB_INSTANCE_NAME],
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(create_subscription_operator.hook)
create_subscription_operator.execute(None)
result = self.hook.conn.describe_event_subscriptions(SubscriptionName=SUBSCRIPTION_NAME)
subscriptions = result.get("EventSubscriptionsList")
assert subscriptions
assert len(subscriptions) == 1
assert subscriptions[0]["Status"] == "active"
mock_await_status.assert_not_called()
def test_template_fields(self):
operator = RdsCreateEventSubscriptionOperator(
task_id="test_create",
subscription_name=SUBSCRIPTION_NAME,
sns_topic_arn=SUBSCRIPTION_TOPIC,
source_type="db-instance",
source_ids=[DB_INSTANCE_NAME],
aws_conn_id=AWS_CONN,
region_name=REGION,
)
validate_template_fields(operator)
| TestRdsCreateEventSubscriptionOperator |
python | numba__numba | numba/tests/enum_usecases.py | {
"start": 513,
"end": 590
} | class ____(Enum):
red = 1.0
green = 2.0
blue = 3j
| HeterogeneousEnum |
python | huggingface__transformers | src/transformers/models/lfm2_vl/processing_lfm2_vl.py | {
"start": 1034,
"end": 1130
} | class ____(TextKwargs, total=False):
use_image_special_tokens: Optional[bool]
| Lfm2VlTextKwargs |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py | {
"start": 2701,
"end": 2984
} | class ____(StrictBaseModel):
"""Schema for updating TaskInstance to a terminal state except SUCCESS state."""
state: TerminalStateNonSuccess
end_date: UtcDateTime
"""When the task completed executing"""
rendered_map_index: str | None = None
| TITerminalStatePayload |
python | rapidsai__cudf | python/cudf/cudf/core/join/_join_helpers.py | {
"start": 1363,
"end": 6545
} | class ____(_Indexer):
def get(self, obj: DataFrame) -> ColumnBase:
return obj.index._data[self.name]
def set(self, obj: DataFrame, value: ColumnBase):
obj.index._data.set_by_label(self.name, value)
def _match_join_keys(
lcol: ColumnBase, rcol: ColumnBase, how: str
) -> tuple[ColumnBase, ColumnBase]:
# Casts lcol and rcol to a common dtype for use as join keys. If no casting
# is necessary, they are returned as is.
common_type = None
# cast the keys lcol and rcol to a common dtype
ltype = lcol.dtype
rtype = rcol.dtype
# if either side is categorical, different logic
left_is_categorical = isinstance(ltype, CategoricalDtype)
right_is_categorical = isinstance(rtype, CategoricalDtype)
if left_is_categorical and right_is_categorical:
return _match_categorical_dtypes_both(lcol, rcol, how) # type: ignore[arg-type]
elif left_is_categorical or right_is_categorical:
if left_is_categorical:
if how in {"left", "leftsemi", "leftanti"}:
return lcol, rcol.astype(ltype)
common_type = ltype.categories.dtype # type: ignore[union-attr]
if get_option("mode.pandas_compatible"):
common_type = get_dtype_of_same_kind(rtype, common_type)
else:
common_type = rtype.categories.dtype # type: ignore[union-attr]
if get_option("mode.pandas_compatible"):
common_type = get_dtype_of_same_kind(ltype, common_type)
return lcol.astype(common_type), rcol.astype(common_type)
if is_dtype_equal(ltype, rtype):
return lcol, rcol
if isinstance(
ltype, (Decimal32Dtype, Decimal64Dtype, Decimal128Dtype)
) or isinstance(rtype, (Decimal32Dtype, Decimal64Dtype, Decimal128Dtype)):
raise TypeError(
"Decimal columns can only be merged with decimal columns "
"of the same precision and scale"
)
if (
is_dtype_obj_numeric(ltype)
and is_dtype_obj_numeric(rtype)
and not (ltype.kind == "m" or rtype.kind == "m")
):
common_type = (
max(ltype, rtype)
if ltype.kind == rtype.kind
else find_common_type((ltype, rtype))
)
elif (ltype.kind == "M" and rtype.kind == "M") or (
ltype.kind == "m" and rtype.kind == "m"
):
common_type = max(ltype, rtype)
elif ltype.kind in "mM" and not rcol.fillna(0).can_cast_safely(ltype):
raise TypeError(
f"Cannot join between {ltype} and {rtype}, please type-cast both "
"columns to the same type."
)
elif rtype.kind in "mM" and not lcol.fillna(0).can_cast_safely(rtype):
raise TypeError(
f"Cannot join between {rtype} and {ltype}, please type-cast both "
"columns to the same type."
)
if how == "left" and rcol.fillna(0).can_cast_safely(ltype):
return lcol, rcol.astype(ltype)
elif common_type is None:
common_type = np.dtype(np.float64)
return lcol.astype(common_type), rcol.astype(common_type)
def _match_categorical_dtypes_both(
lcol: CategoricalColumn, rcol: CategoricalColumn, how: str
) -> tuple[ColumnBase, ColumnBase]:
ltype, rtype = lcol.dtype, rcol.dtype
# when both are ordered and both have the same categories,
# no casting required:
if ltype._internal_eq(rtype):
return lcol, rcol
# Merging categorical variables when only one side is ordered is
# ambiguous and not allowed.
if ltype.ordered != rtype.ordered:
raise TypeError(
"Merging on categorical variables with mismatched"
" ordering is ambiguous"
)
if ltype.ordered and rtype.ordered:
# if we get to here, categories must be what causes the
# dtype equality check to fail. And we can never merge
# two ordered categoricals with different categories
raise TypeError(
f"{how} merge between categoricals with "
"different categories is only valid when "
"neither side is ordered"
)
if how == "inner":
# cast to category types -- we must cast them back later
return _match_join_keys(
lcol._get_decategorized_column(),
rcol._get_decategorized_column(),
how,
)
elif how in {"left", "leftanti", "leftsemi"}:
# always cast to left type
return lcol, rcol._get_decategorized_column().astype(ltype)
else:
# merge categories
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
merged_categories = concat(
[ltype.categories, rtype.categories]
).unique()
common_type = CategoricalDtype(
categories=merged_categories, ordered=False
)
return lcol._get_decategorized_column().astype(
common_type
), rcol._get_decategorized_column().astype(common_type)
def _coerce_to_tuple(obj):
if isinstance(obj, Iterable) and not isinstance(obj, str):
return tuple(obj)
else:
return (obj,)
| _IndexIndexer |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/compat.py | {
"start": 3114,
"end": 3480
} | class ____(ApplicationError):
"""Option(s) were specified which do not provide support for the controller and would be ignored because they are irrelevant for the target."""
def __init__(self, context: str) -> None:
super().__init__(f'Environment `{context}` does not provide a Python version supported by the controller.')
| ControllerNotSupportedError |
python | PrefectHQ__prefect | tests/server/schemas/test_core.py | {
"start": 1948,
"end": 2496
} | class ____:
async def test_block_document_reference_different_parent_and_ref(self):
same_id = uuid4()
with pytest.raises(
ValueError,
match=(
"`parent_block_document_id` and `reference_block_document_id` cannot be"
" the same"
),
):
schemas.core.BlockDocumentReference(
parent_block_document_id=same_id,
reference_block_document_id=same_id,
name="name",
)
| TestBlockDocumentReference |
python | mlflow__mlflow | mlflow/store/_unity_catalog/registry/rest_store.py | {
"start": 12667,
"end": 74745
} | class ____(BaseRestStore):
"""
Client for a remote model registry server accessed via REST API calls
Args:
store_uri: URI with scheme 'databricks-uc'
tracking_uri: URI of the Databricks MLflow tracking server from which to fetch
run info and download run artifacts, when creating new model
versions from source artifacts logged to an MLflow run.
"""
def __init__(self, store_uri, tracking_uri):
super().__init__(get_host_creds=functools.partial(get_databricks_host_creds, store_uri))
self.tracking_uri = tracking_uri
self.get_tracking_host_creds = functools.partial(get_databricks_host_creds, tracking_uri)
try:
self.spark = _get_active_spark_session()
except Exception:
pass
def _get_response_from_method(self, method):
method_to_response = {
CreateRegisteredModelRequest: CreateRegisteredModelResponse,
UpdateRegisteredModelRequest: UpdateRegisteredModelResponse,
DeleteRegisteredModelRequest: DeleteRegisteredModelResponse,
CreateModelVersionRequest: CreateModelVersionResponse,
FinalizeModelVersionRequest: FinalizeModelVersionResponse,
UpdateModelVersionRequest: UpdateModelVersionResponse,
DeleteModelVersionRequest: DeleteModelVersionResponse,
GetModelVersionDownloadUriRequest: GetModelVersionDownloadUriResponse,
SearchModelVersionsRequest: SearchModelVersionsResponse,
GetRegisteredModelRequest: GetRegisteredModelResponse,
GetModelVersionRequest: GetModelVersionResponse,
SearchRegisteredModelsRequest: SearchRegisteredModelsResponse,
GenerateTemporaryModelVersionCredentialsRequest: (
GenerateTemporaryModelVersionCredentialsResponse
),
GetRun: GetRun.Response,
SetRegisteredModelAliasRequest: SetRegisteredModelAliasResponse,
DeleteRegisteredModelAliasRequest: DeleteRegisteredModelAliasResponse,
SetRegisteredModelTagRequest: SetRegisteredModelTagResponse,
DeleteRegisteredModelTagRequest: DeleteRegisteredModelTagResponse,
SetModelVersionTagRequest: SetModelVersionTagResponse,
DeleteModelVersionTagRequest: DeleteModelVersionTagResponse,
GetModelVersionByAliasRequest: GetModelVersionByAliasResponse,
CreatePromptRequest: ProtoPrompt,
SearchPromptsRequest: SearchPromptsResponse,
DeletePromptRequest: google.protobuf.empty_pb2.Empty,
SetPromptTagRequest: google.protobuf.empty_pb2.Empty,
DeletePromptTagRequest: google.protobuf.empty_pb2.Empty,
CreatePromptVersionRequest: ProtoPromptVersion,
GetPromptVersionRequest: ProtoPromptVersion,
DeletePromptVersionRequest: google.protobuf.empty_pb2.Empty,
GetPromptVersionByAliasRequest: ProtoPromptVersion,
UpdatePromptRequest: ProtoPrompt,
GetPromptRequest: ProtoPrompt,
SearchPromptVersionsRequest: SearchPromptVersionsResponse,
SetPromptAliasRequest: google.protobuf.empty_pb2.Empty,
DeletePromptAliasRequest: google.protobuf.empty_pb2.Empty,
SetPromptVersionTagRequest: google.protobuf.empty_pb2.Empty,
DeletePromptVersionTagRequest: google.protobuf.empty_pb2.Empty,
UpdatePromptVersionRequest: ProtoPromptVersion,
LinkPromptVersionsToModelsRequest: google.protobuf.empty_pb2.Empty,
LinkPromptsToTracesRequest: google.protobuf.empty_pb2.Empty,
LinkPromptVersionsToRunsRequest: google.protobuf.empty_pb2.Empty,
}
return method_to_response[method]()
def _get_endpoint_from_method(self, method):
return _METHOD_TO_INFO[method]
def _get_all_endpoints_from_method(self, method):
return _METHOD_TO_ALL_INFO[method]
# CRUD API for RegisteredModel objects
def create_registered_model(self, name, tags=None, description=None, deployment_job_id=None):
"""
Create a new registered model in backend store.
Args:
name: Name of the new model. This is expected to be unique in the backend store.
tags: A list of :py:class:`mlflow.entities.model_registry.RegisteredModelTag`
instances associated with this registered model.
description: Description of the model.
deployment_job_id: Optional deployment job id.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.RegisteredModel`
created in the backend.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
CreateRegisteredModelRequest(
name=full_name,
description=description,
tags=uc_registered_model_tag_from_mlflow_tags(tags),
deployment_job_id=str(deployment_job_id) if deployment_job_id else None,
)
)
try:
response_proto = self._call_endpoint(CreateRegisteredModelRequest, req_body)
except RestException as e:
def reraise_with_legacy_hint(exception, legacy_hint):
new_message = exception.message.rstrip(".") + f". {legacy_hint}"
raise MlflowException(
message=new_message,
error_code=exception.error_code,
)
if "specify all three levels" in e.message:
# The exception is likely due to the user trying to create a registered model
# in Unity Catalog without specifying a 3-level name (catalog.schema.model).
# The user may not be intending to use the Unity Catalog Model Registry at all,
# but rather the legacy Workspace Model Registry. Accordingly, we re-raise with
# a hint
legacy_hint = (
"If you are trying to use the legacy Workspace Model Registry, instead of the"
" recommended Unity Catalog Model Registry, set the Model Registry URI to"
" 'databricks' (legacy) instead of 'databricks-uc' (recommended)."
)
reraise_with_legacy_hint(exception=e, legacy_hint=legacy_hint)
elif "METASTORE_DOES_NOT_EXIST" in e.message:
legacy_hint = (
"If you are trying to use the Model Registry in a Databricks workspace that"
" does not have Unity Catalog enabled, either enable Unity Catalog in the"
" workspace (recommended) or set the Model Registry URI to 'databricks' to"
" use the legacy Workspace Model Registry."
)
reraise_with_legacy_hint(exception=e, legacy_hint=legacy_hint)
else:
raise
if deployment_job_id:
_print_databricks_deployment_job_url(
model_name=full_name,
job_id=str(deployment_job_id),
)
return registered_model_from_uc_proto(response_proto.registered_model)
def update_registered_model(self, name, description=None, deployment_job_id=None):
"""
Update description of the registered model.
Args:
name: Registered model name.
description: New description.
deployment_job_id: Optional deployment job id.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
UpdateRegisteredModelRequest(
name=full_name,
description=description,
deployment_job_id=str(deployment_job_id) if deployment_job_id is not None else None,
)
)
response_proto = self._call_endpoint(UpdateRegisteredModelRequest, req_body)
if deployment_job_id:
_print_databricks_deployment_job_url(
model_name=full_name,
job_id=str(deployment_job_id),
)
return registered_model_from_uc_proto(response_proto.registered_model)
def rename_registered_model(self, name, new_name):
"""
Rename the registered model.
Args:
name: Registered model name.
new_name: New proposed name.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(UpdateRegisteredModelRequest(name=full_name, new_name=new_name))
response_proto = self._call_endpoint(UpdateRegisteredModelRequest, req_body)
return registered_model_from_uc_proto(response_proto.registered_model)
def delete_registered_model(self, name):
"""
Delete the registered model.
Backend raises exception if a registered model with given name does not exist.
Args:
name: Registered model name.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(DeleteRegisteredModelRequest(name=full_name))
self._call_endpoint(DeleteRegisteredModelRequest, req_body)
def search_registered_models(
self, filter_string=None, max_results=None, order_by=None, page_token=None
):
"""
Search for registered models in backend that satisfy the filter criteria.
Args:
filter_string: Filter query string, defaults to searching all registered models.
max_results: Maximum number of registered models desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_registered_models`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects
that satisfy the search expressions. The pagination token for the next page can be
obtained via the ``token`` attribute of the object.
"""
_require_arg_unspecified("filter_string", filter_string)
_require_arg_unspecified("order_by", order_by)
req_body = message_to_json(
SearchRegisteredModelsRequest(
max_results=max_results,
page_token=page_token,
)
)
response_proto = self._call_endpoint(SearchRegisteredModelsRequest, req_body)
registered_models = [
registered_model_search_from_uc_proto(registered_model)
for registered_model in response_proto.registered_models
]
return PagedList(registered_models, response_proto.next_page_token)
def get_registered_model(self, name):
"""
Get registered model instance by name.
Args:
name: Registered model name.
Returns:
A single :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(GetRegisteredModelRequest(name=full_name))
response_proto = self._call_endpoint(GetRegisteredModelRequest, req_body)
return registered_model_from_uc_proto(response_proto.registered_model)
def get_latest_versions(self, name, stages=None):
"""
Latest version models for each requested stage. If no ``stages`` argument is provided,
returns the latest version for each stage.
Args:
name: Registered model name.
stages: List of desired stages. If input list is None, return latest versions for
each stage.
Returns:
List of :py:class:`mlflow.entities.model_registry.ModelVersion` objects.
"""
alias_doc_url = "https://mlflow.org/docs/latest/model-registry.html#deploy-and-organize-models-with-aliases-and-tags"
if stages is None:
message = (
"To load the latest version of a model in Unity Catalog, you can "
"set an alias on the model version and load it by alias. See "
f"{alias_doc_url} for details."
)
else:
message = (
f"Detected attempt to load latest model version in stages {stages}. "
"You may see this error because:\n"
"1) You're attempting to load a model version by stage. Setting stages "
"and loading model versions by stage is unsupported in Unity Catalog. Instead, "
"use aliases for flexible model deployment. See "
f"{alias_doc_url} for details.\n"
"2) You're attempting to load a model version by alias. Use "
"syntax 'models:/your_model_name@your_alias_name'\n"
"3) You're attempting load a model version by version number. Verify "
"that the version number is a valid integer"
)
_raise_unsupported_method(
method="get_latest_versions",
message=message,
)
def set_registered_model_tag(self, name, tag):
"""
Set a tag for the registered model.
Args:
name: Registered model name.
tag: :py:class:`mlflow.entities.model_registry.RegisteredModelTag` instance to log.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
SetRegisteredModelTagRequest(name=full_name, key=tag.key, value=tag.value)
)
self._call_endpoint(SetRegisteredModelTagRequest, req_body)
def delete_registered_model_tag(self, name, key):
"""
Delete a tag associated with the registered model.
Args:
name: Registered model name.
key: Registered model tag key.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(DeleteRegisteredModelTagRequest(name=full_name, key=key))
self._call_endpoint(DeleteRegisteredModelTagRequest, req_body)
# CRUD API for ModelVersion objects
def _finalize_model_version(self, name, version):
"""
Finalize a UC model version after its files have been written to managed storage,
updating its status from PENDING_REGISTRATION to READY
Args:
name: Registered model name
version: Model version number
Returns:
Protobuf ModelVersion describing the finalized model version
"""
req_body = message_to_json(FinalizeModelVersionRequest(name=name, version=version))
return self._call_endpoint(FinalizeModelVersionRequest, req_body).model_version
def _get_temporary_model_version_write_credentials(self, name, version) -> TemporaryCredentials:
"""
Get temporary credentials for uploading model version files
Args:
name: Registered model name.
version: Model version number.
Returns:
mlflow.protos.databricks_uc_registry_messages_pb2.TemporaryCredentials containing
temporary model version credentials.
"""
req_body = message_to_json(
GenerateTemporaryModelVersionCredentialsRequest(
name=name, version=version, operation=MODEL_VERSION_OPERATION_READ_WRITE
)
)
return self._call_endpoint(
GenerateTemporaryModelVersionCredentialsRequest, req_body
).credentials
def _get_run_and_headers(self, run_id):
if run_id is None or not is_databricks_uri(self.tracking_uri):
return None, None
host_creds = self.get_tracking_host_creds()
endpoint, method = _TRACKING_METHOD_TO_INFO[GetRun]
response = http_request(
host_creds=host_creds,
endpoint=endpoint,
method=method,
params={"run_id": run_id},
)
try:
verify_rest_response(response, endpoint)
except MlflowException:
_logger.warning(
f"Unable to fetch model version's source run (with ID {run_id}) "
"from tracking server. The source run may be deleted or inaccessible to the "
"current user. No run link will be recorded for the model version."
)
return None, None
headers = response.headers
js_dict = response.json()
parsed_response = GetRun.Response()
parse_dict(js_dict=js_dict, message=parsed_response)
run = Run.from_proto(parsed_response.run)
return headers, run
def _get_workspace_id(self, headers):
if headers is None or _DATABRICKS_ORG_ID_HEADER not in headers:
_logger.warning(
"Unable to get model version source run's workspace ID from request headers. "
"No run link will be recorded for the model version"
)
return None
return headers[_DATABRICKS_ORG_ID_HEADER]
def _get_notebook_id(self, run):
if run is None:
return None
return run.data.tags.get(MLFLOW_DATABRICKS_NOTEBOOK_ID, None)
def _get_job_id(self, run):
if run is None:
return None
return run.data.tags.get(MLFLOW_DATABRICKS_JOB_ID, None)
def _get_job_run_id(self, run):
if run is None:
return None
return run.data.tags.get(MLFLOW_DATABRICKS_JOB_RUN_ID, None)
def _get_lineage_input_sources(self, run):
from mlflow.data.delta_dataset_source import DeltaDatasetSource
if run is None:
return None
securable_list = []
if run.inputs is not None:
for dataset in run.inputs.dataset_inputs:
dataset_source = mlflow.data.get_source(dataset)
if (
isinstance(dataset_source, DeltaDatasetSource)
and dataset_source._get_source_type() == _DELTA_TABLE
):
# check if dataset is a uc table and then append
if dataset_source.delta_table_name and dataset_source.delta_table_id:
table_entity = Table(
name=dataset_source.delta_table_name,
table_id=dataset_source.delta_table_id,
)
securable_list.append(Securable(table=table_entity))
if len(securable_list) > _MAX_LINEAGE_DATA_SOURCES:
_logger.warning(
f"Model version has {len(securable_list)!s} upstream datasets, which "
f"exceeds the max of 10 upstream datasets for lineage tracking. Only "
f"the first 10 datasets will be propagated to Unity Catalog lineage"
)
return securable_list[0:_MAX_LINEAGE_DATA_SOURCES]
else:
return None
def _validate_model_signature(self, local_model_path):
# Import Model here instead of in the top level, to avoid circular import; the
# mlflow.models.model module imports from MLflow tracking, which triggers an import of
# this file during store registry initialization
model = _load_model(local_model_path)
signature_required_explanation = (
"All models in the Unity Catalog must be logged with a "
"model signature containing both input and output "
"type specifications. See "
"https://mlflow.org/docs/latest/model/signatures.html#how-to-log-models-with-signatures"
" for details on how to log a model with a signature"
)
if model.signature is None:
raise MlflowException(
"Model passed for registration did not contain any signature metadata. "
f"{signature_required_explanation}"
)
if model.signature.outputs is None:
raise MlflowException(
"Model passed for registration contained a signature that includes only inputs. "
f"{signature_required_explanation}"
)
def _download_model_weights_if_not_saved(self, local_model_path):
"""
Transformers models can be saved without the base model weights by setting
`save_pretrained=False` when saving or logging the model. Such 'weight-less'
model cannot be directly deployed to model serving, so here we download the
weights proactively from the HuggingFace hub and save them to the model directory.
"""
model = _load_model(local_model_path)
flavor_conf = model.flavors.get("transformers")
if not flavor_conf:
return
from mlflow.transformers.flavor_config import FlavorKey
from mlflow.transformers.model_io import _MODEL_BINARY_FILE_NAME
if (
FlavorKey.MODEL_BINARY in flavor_conf
and os.path.exists(os.path.join(local_model_path, _MODEL_BINARY_FILE_NAME))
and FlavorKey.MODEL_REVISION not in flavor_conf
):
# Model weights are already saved
return
_logger.info(
"You are attempting to register a transformers model that does not have persisted "
"model weights. Attempting to fetch the weights so that the model can be registered "
"within Unity Catalog."
)
try:
mlflow.transformers.persist_pretrained_model(local_model_path)
except Exception as e:
raise MlflowException(
"Failed to download the model weights from the HuggingFace hub and cannot register "
"the model in the Unity Catalog. Please ensure that the model was saved with the "
"correct reference to the HuggingFace hub repository and that you have access to "
"fetch model weights from the defined repository.",
error_code=INTERNAL_ERROR,
) from e
@contextmanager
def _local_model_dir(self, source, local_model_path):
if local_model_path is not None:
yield local_model_path
else:
try:
local_model_dir = mlflow.artifacts.download_artifacts(
artifact_uri=source, tracking_uri=self.tracking_uri
)
except Exception as e:
raise MlflowException(
f"Unable to download model artifacts from source artifact location "
f"'{source}' in order to upload them to Unity Catalog. Please ensure "
f"the source artifact location exists and that you can download from "
f"it via mlflow.artifacts.download_artifacts(). Original error: {e}"
) from e
try:
yield local_model_dir
finally:
# Clean up temporary model directory at end of block. We assume a temporary
# model directory was created if the `source` is not a local path
# (must be downloaded from remote to a temporary directory) and
# `local_model_dir` is not a FUSE-mounted path. The check for FUSE-mounted
# paths is important as mlflow.artifacts.download_artifacts() can return
# a FUSE mounted path equivalent to the (remote) source path in some cases,
# e.g. return /dbfs/some/path for source dbfs:/some/path.
if not os.path.exists(source) and not is_fuse_or_uc_volumes_uri(local_model_dir):
shutil.rmtree(local_model_dir)
def _get_logged_model_from_model_id(self, model_id) -> LoggedModel | None:
# load the MLflow LoggedModel by model_id and
if model_id is None:
return None
return mlflow.get_logged_model(model_id)
def _create_model_version_with_optional_signature_validation(
self,
name,
source,
run_id=None,
tags=None,
run_link=None,
description=None,
local_model_path=None,
model_id: str | None = None,
bypass_signature_validation: bool = False,
source_workspace_id: str | None = None,
):
"""
Private method to create a new model version from given source and run ID, with optional
bypass of signature validation. This bypass is currently only used by the
DatabricksWorkspaceModelRegistryRestStore to migrate model versions from the Databricks
workspace registry to Unity Catalog via copy_model_version. We do not want to allow
normal use of create_model_version to bypass signature validation, so we have this
private method.
Args:
name: Registered model name.
source: URI indicating the location of the model artifacts.
run_id: Run ID from MLflow tracking server that generated the model.
tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag`
instances associated with this model version.
run_link: Link to the run from an MLflow tracking server that generated this model.
description: Description of the version.
local_model_path: Local path to the MLflow model, if it's already accessible on the
local filesystem. Can be used by AbstractStores that upload model version files
to the model registry to avoid a redundant download from the source location when
logging and registering a model via a single
mlflow.<flavor>.log_model(..., registered_model_name) call.
model_id: The ID of the model (from an Experiment) that is being promoted to a
registered model version, if applicable.
bypass_signature_validation: Whether to bypass signature validation.
source_workspace_id: The workspace ID of the source run. If not provided,
it will be fetched from the run headers.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.ModelVersion`
created in the backend.
"""
_require_arg_unspecified(arg_name="run_link", arg_value=run_link)
if logged_model := self._get_logged_model_from_model_id(model_id):
run_id = logged_model.source_run_id
headers, run = self._get_run_and_headers(run_id)
if source_workspace_id is None:
source_workspace_id = self._get_workspace_id(headers)
notebook_id = self._get_notebook_id(run)
lineage_securable_list = self._get_lineage_input_sources(run)
job_id = self._get_job_id(run)
job_run_id = self._get_job_run_id(run)
extra_headers = None
if notebook_id is not None or job_id is not None:
entity_list = []
lineage_list = None
if notebook_id is not None:
notebook_entity = Notebook(id=str(notebook_id))
entity_list.append(Entity(notebook=notebook_entity))
if job_id is not None:
job_entity = Job(id=job_id, job_run_id=job_run_id)
entity_list.append(Entity(job=job_entity))
if lineage_securable_list is not None:
lineage_list = [Lineage(source_securables=lineage_securable_list)]
lineage_header_info = LineageHeaderInfo(entities=entity_list, lineages=lineage_list)
# Base64-encode the header value to ensure it's valid ASCII,
# similar to JWT (see https://stackoverflow.com/a/40347926)
header_json = message_to_json(lineage_header_info)
header_base64 = base64.b64encode(header_json.encode())
extra_headers = {_DATABRICKS_LINEAGE_ID_HEADER: header_base64}
full_name = get_full_name_from_sc(name, self.spark)
with self._local_model_dir(source, local_model_path) as local_model_dir:
if not bypass_signature_validation:
self._validate_model_signature(local_model_dir)
self._download_model_weights_if_not_saved(local_model_dir)
feature_deps = get_feature_dependencies(local_model_dir)
other_model_deps = get_model_version_dependencies(local_model_dir)
req_body = message_to_json(
CreateModelVersionRequest(
name=full_name,
source=source,
run_id=run_id,
description=description,
tags=uc_model_version_tag_from_mlflow_tags(tags),
run_tracking_server_id=source_workspace_id,
feature_deps=feature_deps,
model_version_dependencies=other_model_deps,
model_id=model_id,
)
)
model_version = self._call_endpoint(
CreateModelVersionRequest, req_body, extra_headers=extra_headers
).model_version
store = self._get_artifact_repo(model_version, full_name)
store.log_artifacts(local_dir=local_model_dir, artifact_path="")
finalized_mv = self._finalize_model_version(
name=full_name, version=model_version.version
)
return model_version_from_uc_proto(finalized_mv)
def create_model_version(
self,
name,
source,
run_id=None,
tags=None,
run_link=None,
description=None,
local_model_path=None,
model_id: str | None = None,
):
"""
Create a new model version from given source and run ID.
Args:
name: Registered model name.
source: URI indicating the location of the model artifacts.
run_id: Run ID from MLflow tracking server that generated the model.
tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag`
instances associated with this model version.
run_link: Link to the run from an MLflow tracking server that generated this model.
description: Description of the version.
local_model_path: Local path to the MLflow model, if it's already accessible on the
local filesystem. Can be used by AbstractStores that upload model version files
to the model registry to avoid a redundant download from the source location when
logging and registering a model via a single
mlflow.<flavor>.log_model(..., registered_model_name) call.
model_id: The ID of the model (from an Experiment) that is being promoted to a
registered model version, if applicable.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.ModelVersion`
created in the backend.
"""
return self._create_model_version_with_optional_signature_validation(
name=name,
source=source,
run_id=run_id,
tags=tags,
run_link=run_link,
description=description,
local_model_path=local_model_path,
model_id=model_id,
bypass_signature_validation=False,
)
def _get_artifact_repo(self, model_version, model_name=None):
def base_credential_refresh_def():
return self._get_temporary_model_version_write_credentials(
name=model_version.name, version=model_version.version
)
if is_databricks_sdk_models_artifact_repository_enabled(self.get_host_creds()):
return DatabricksSDKModelsArtifactRepository(model_name, model_version.version)
scoped_token = base_credential_refresh_def()
if scoped_token.storage_mode == StorageMode.DEFAULT_STORAGE:
return PresignedUrlArtifactRepository(
self.get_host_creds(), model_version.name, model_version.version
)
return get_artifact_repo_from_storage_info(
storage_location=model_version.storage_location,
scoped_token=scoped_token,
base_credential_refresh_def=base_credential_refresh_def,
)
def transition_model_version_stage(self, name, version, stage, archive_existing_versions):
"""
Update model version stage.
Args:
name: Registered model name.
version: Registered model version.
stage: New desired stage for this model version.
archive_existing_versions: If this flag is set to ``True``, all existing model
versions in the stage will be automatically moved to the "archived" stage. Only
valid when ``stage`` is ``"staging"`` or ``"production"`` otherwise an error will be
raised.
"""
_raise_unsupported_method(
method="transition_model_version_stage",
message="We recommend using aliases instead of stages for more flexible model "
"deployment management. You can set an alias on a registered model using "
"`MlflowClient().set_registered_model_alias(name, alias, version)` and load a model "
"version by alias using the URI 'models:/your_model_name@your_alias', e.g. "
"`mlflow.pyfunc.load_model('models:/your_model_name@your_alias')`.",
)
def update_model_version(self, name, version, description):
"""
Update metadata associated with a model version in backend.
Args:
name: Registered model name.
version: Registered model version.
description: New model description.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
UpdateModelVersionRequest(name=full_name, version=str(version), description=description)
)
response_proto = self._call_endpoint(UpdateModelVersionRequest, req_body)
return model_version_from_uc_proto(response_proto.model_version)
def delete_model_version(self, name, version):
"""
Delete model version in backend.
Args:
name: Registered model name.
version: Registered model version.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(DeleteModelVersionRequest(name=full_name, version=str(version)))
self._call_endpoint(DeleteModelVersionRequest, req_body)
def get_model_version(self, name, version):
"""
Get the model version instance by name and version.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(GetModelVersionRequest(name=full_name, version=str(version)))
response_proto = self._call_endpoint(GetModelVersionRequest, req_body)
return model_version_from_uc_proto(response_proto.model_version)
def get_model_version_download_uri(self, name, version):
"""
Get the download location in Model Registry for this model version.
NOTE: For first version of Model Registry, since the models are not copied over to another
location, download URI points to input source path.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single URI location that allows reads for downloading.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
GetModelVersionDownloadUriRequest(name=full_name, version=str(version))
)
response_proto = self._call_endpoint(GetModelVersionDownloadUriRequest, req_body)
return response_proto.artifact_uri
def search_model_versions(
self, filter_string=None, max_results=None, order_by=None, page_token=None
):
"""
Search for model versions in backend that satisfy the filter criteria.
Args:
filter_string: A filter string expression. Currently supports a single filter
condition either name of model like ``name = 'model_name'`` or
``run_id = '...'``.
max_results: Maximum number of model versions desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_model_versions`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.ModelVersion`
objects that satisfy the search expressions. The pagination token for the next
page can be obtained via the ``token`` attribute of the object.
"""
_require_arg_unspecified(arg_name="order_by", arg_value=order_by)
req_body = message_to_json(
SearchModelVersionsRequest(
filter=filter_string, page_token=page_token, max_results=max_results
)
)
response_proto = self._call_endpoint(SearchModelVersionsRequest, req_body)
model_versions = [
model_version_search_from_uc_proto(mvd) for mvd in response_proto.model_versions
]
return PagedList(model_versions, response_proto.next_page_token)
def set_model_version_tag(self, name, version, tag):
"""
Set a tag for the model version.
Args:
name: Registered model name.
version: Registered model version.
tag: :py:class:`mlflow.entities.model_registry.ModelVersionTag` instance to log.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
SetModelVersionTagRequest(
name=full_name, version=str(version), key=tag.key, value=tag.value
)
)
self._call_endpoint(SetModelVersionTagRequest, req_body)
def delete_model_version_tag(self, name, version, key):
"""
Delete a tag associated with the model version.
Args:
name: Registered model name.
version: Registered model version.
key: Tag key.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
DeleteModelVersionTagRequest(name=full_name, version=version, key=key)
)
self._call_endpoint(DeleteModelVersionTagRequest, req_body)
def set_registered_model_alias(self, name, alias, version):
"""
Set a registered model alias pointing to a model version.
Args:
name: Registered model name.
alias: Name of the alias.
version: Registered model version number.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(
SetRegisteredModelAliasRequest(name=full_name, alias=alias, version=str(version))
)
self._call_endpoint(SetRegisteredModelAliasRequest, req_body)
def delete_registered_model_alias(self, name, alias):
"""
Delete an alias associated with a registered model.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
None
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(DeleteRegisteredModelAliasRequest(name=full_name, alias=alias))
self._call_endpoint(DeleteRegisteredModelAliasRequest, req_body)
def get_model_version_by_alias(self, name, alias):
"""
Get the model version instance by name and alias.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
full_name = get_full_name_from_sc(name, self.spark)
req_body = message_to_json(GetModelVersionByAliasRequest(name=full_name, alias=alias))
response_proto = self._call_endpoint(GetModelVersionByAliasRequest, req_body)
return model_version_from_uc_proto(response_proto.model_version)
def _await_model_version_creation(self, mv, await_creation_for):
"""
Does not wait for the model version to become READY as a successful creation will
immediately place the model version in a READY state.
"""
# Prompt-related method overrides for UC
def create_prompt(
self,
name: str,
description: str | None = None,
tags: dict[str, str] | None = None,
) -> Prompt:
"""
Create a new prompt in Unity Catalog (metadata only, no initial version).
"""
# Create a Prompt object with the provided fields
prompt_proto = ProtoPrompt()
prompt_proto.name = name
if description:
prompt_proto.description = description
if tags:
prompt_proto.tags.extend(mlflow_tags_to_proto(tags))
req_body = message_to_json(
CreatePromptRequest(
name=name,
prompt=prompt_proto,
)
)
response_proto = self._call_endpoint(CreatePromptRequest, req_body)
return proto_info_to_mlflow_prompt_info(response_proto, tags or {})
def search_prompts(
self,
filter_string: str | None = None,
max_results: int | None = None,
order_by: list[str] | None = None,
page_token: str | None = None,
) -> PagedList[Prompt]:
"""
Search for prompts in Unity Catalog.
Args:
filter_string: Filter string that must include catalog and schema in the format:
"catalog = 'catalog_name' AND schema = 'schema_name'"
max_results: Maximum number of results to return
order_by: List of fields to order by (not used in current implementation)
page_token: Token for pagination
"""
# Parse catalog and schema from filter string
if filter_string:
parsed_filter = self._parse_catalog_schema_from_filter(filter_string)
else:
raise MlflowException(
"For Unity Catalog prompt registries, you must specify catalog and schema "
"in the filter string: \"catalog = 'catalog_name' AND schema = 'schema_name'\"",
INVALID_PARAMETER_VALUE,
)
# Build the request with Unity Catalog schema
unity_catalog_schema = UnityCatalogSchema(
catalog_name=parsed_filter.catalog_name, schema_name=parsed_filter.schema_name
)
req_body = message_to_json(
SearchPromptsRequest(
catalog_schema=unity_catalog_schema,
filter=parsed_filter.remaining_filter,
max_results=max_results,
page_token=page_token,
)
)
response_proto = self._call_endpoint(SearchPromptsRequest, req_body)
# For UC, only use the basic prompt info without extra tag fetching
prompts = [
proto_info_to_mlflow_prompt_info(prompt_info, {})
for prompt_info in response_proto.prompts
]
return PagedList(prompts, response_proto.next_page_token)
def _parse_catalog_schema_from_filter(self, filter_string: str | None) -> _CatalogSchemaFilter:
"""
Parse catalog and schema from filter string for Unity Catalog using regex.
Expects filter format: "catalog = 'catalog_name' AND schema = 'schema_name'"
Args:
filter_string: Filter string containing catalog and schema
Returns:
_CatalogSchemaFilter object with catalog_name, schema_name, and remaining_filter
Raises:
MlflowException: If filter format is invalid for Unity Catalog
"""
if not filter_string:
raise MlflowException(
"For Unity Catalog prompt registries, you must specify catalog and schema "
"in the filter string: \"catalog = 'catalog_name' AND schema = 'schema_name'\"",
INVALID_PARAMETER_VALUE,
)
# Use pre-compiled regex patterns for better performance
catalog_match = _CATALOG_PATTERN.search(filter_string)
schema_match = _SCHEMA_PATTERN.search(filter_string)
if not catalog_match or not schema_match:
raise MlflowException(
"For Unity Catalog prompt registries, filter string must include both "
"catalog and schema in the format: "
"\"catalog = 'catalog_name' AND schema = 'schema_name'\". "
f"Got: {filter_string}",
INVALID_PARAMETER_VALUE,
)
catalog_name = catalog_match.group(1)
schema_name = schema_match.group(1)
# Remove catalog and schema from filter string to get remaining filters
# First, normalize the filter by splitting on AND and rebuilding
# without catalog/schema parts
parts = re.split(r"\s+AND\s+", filter_string, flags=re.IGNORECASE)
remaining_parts = []
for part in parts:
part = part.strip()
# Skip parts that match catalog or schema patterns
if not (_CATALOG_PATTERN.match(part) or _SCHEMA_PATTERN.match(part)):
remaining_parts.append(part)
# Rejoin the remaining parts
remaining_filter = " AND ".join(remaining_parts) if remaining_parts else None
return _CatalogSchemaFilter(catalog_name, schema_name, remaining_filter)
def delete_prompt(self, name: str) -> None:
"""
Delete a prompt from Unity Catalog.
"""
req_body = message_to_json(DeletePromptRequest(name=name))
endpoint, method = self._get_endpoint_from_method(DeletePromptRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
proto_name=DeletePromptRequest,
)
def set_prompt_tag(self, name: str, key: str, value: str) -> None:
"""
Set a tag on a prompt in Unity Catalog.
"""
req_body = message_to_json(SetPromptTagRequest(name=name, key=key, value=value))
endpoint, method = self._get_endpoint_from_method(SetPromptTagRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
key=key,
proto_name=SetPromptTagRequest,
)
def delete_prompt_tag(self, name: str, key: str) -> None:
"""
Delete a tag from a prompt in Unity Catalog.
"""
req_body = message_to_json(DeletePromptTagRequest(name=name, key=key))
endpoint, method = self._get_endpoint_from_method(DeletePromptTagRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
key=key,
proto_name=DeletePromptTagRequest,
)
def get_prompt(self, name: str) -> Prompt | None:
"""
Get prompt by name from Unity Catalog.
"""
try:
req_body = message_to_json(GetPromptRequest(name=name))
endpoint, method = self._get_endpoint_from_method(GetPromptRequest)
response_proto = self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
proto_name=GetPromptRequest,
)
return proto_info_to_mlflow_prompt_info(response_proto, {})
except Exception as e:
if isinstance(e, MlflowException) and e.error_code == ErrorCode.Name(
RESOURCE_DOES_NOT_EXIST
):
return None
raise
def create_prompt_version(
self,
name: str,
template: str | list[dict[str, Any]],
description: str | None = None,
tags: dict[str, str] | None = None,
response_format: type[BaseModel] | dict[str, Any] | None = None,
) -> PromptVersion:
"""
Create a new prompt version in Unity Catalog.
"""
# Create a PromptVersion object with the provided fields
prompt_version_proto = ProtoPromptVersion()
prompt_version_proto.name = name
# JSON-encode the template for Unity Catalog server
prompt_version_proto.template = json.dumps(template)
# Note: version will be set by the backend when creating a new version
# We don't set it here as it's generated server-side
if description:
prompt_version_proto.description = description
final_tags = tags.copy() if tags else {}
if response_format:
final_tags[RESPONSE_FORMAT_TAG_KEY] = json.dumps(
PromptVersion.convert_response_format_to_dict(response_format)
)
if isinstance(template, str):
final_tags[PROMPT_TYPE_TAG_KEY] = PROMPT_TYPE_TEXT
else:
final_tags[PROMPT_TYPE_TAG_KEY] = PROMPT_TYPE_CHAT
if final_tags:
prompt_version_proto.tags.extend(mlflow_tags_to_proto_version_tags(final_tags))
req_body = message_to_json(
CreatePromptVersionRequest(
name=name,
prompt_version=prompt_version_proto,
)
)
endpoint, method = self._get_endpoint_from_method(CreatePromptVersionRequest)
response_proto = self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
proto_name=CreatePromptVersionRequest,
)
return proto_to_mlflow_prompt(response_proto)
def get_prompt_version(self, name: str, version: str | int) -> PromptVersion | None:
"""
Get a specific prompt version from Unity Catalog.
"""
try:
req_body = message_to_json(GetPromptVersionRequest(name=name, version=str(version)))
endpoint, method = self._get_endpoint_from_method(GetPromptVersionRequest)
response_proto = self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
version=version,
proto_name=GetPromptVersionRequest,
)
# No longer fetch prompt-level tags - keep them completely separate
return proto_to_mlflow_prompt(response_proto)
except Exception as e:
if isinstance(e, MlflowException) and e.error_code == ErrorCode.Name(
RESOURCE_DOES_NOT_EXIST
):
return None
raise
def delete_prompt_version(self, name: str, version: str | int) -> None:
"""
Delete a prompt version from Unity Catalog.
"""
# Delete the specific version only
req_body = message_to_json(DeletePromptVersionRequest(name=name, version=str(version)))
endpoint, method = self._get_endpoint_from_method(DeletePromptVersionRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
version=version,
proto_name=DeletePromptVersionRequest,
)
def search_prompt_versions(
self, name: str, max_results: int | None = None, page_token: str | None = None
) -> SearchPromptVersionsResponse:
"""
Search prompt versions for a given prompt name in Unity Catalog.
Note: Unity Catalog server uses a non-standard endpoint pattern for this operation.
Args:
name: Name of the prompt to search versions for
max_results: Maximum number of versions to return
page_token: Token for pagination
Returns:
SearchPromptVersionsResponse containing the list of versions
"""
req_body = message_to_json(
SearchPromptVersionsRequest(name=name, max_results=max_results, page_token=page_token)
)
endpoint, method = self._get_endpoint_from_method(SearchPromptVersionsRequest)
return self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
proto_name=SearchPromptVersionsRequest,
)
def set_prompt_version_tag(self, name: str, version: str | int, key: str, value: str) -> None:
"""
Set a tag on a prompt version in Unity Catalog.
"""
req_body = message_to_json(
SetPromptVersionTagRequest(name=name, version=str(version), key=key, value=value)
)
endpoint, method = self._get_endpoint_from_method(SetPromptVersionTagRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
version=version,
key=key,
proto_name=SetPromptVersionTagRequest,
)
def delete_prompt_version_tag(self, name: str, version: str | int, key: str) -> None:
"""
Delete a tag from a prompt version in Unity Catalog.
"""
req_body = message_to_json(
DeletePromptVersionTagRequest(name=name, version=str(version), key=key)
)
endpoint, method = self._get_endpoint_from_method(DeletePromptVersionTagRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
version=version,
key=key,
proto_name=DeletePromptVersionTagRequest,
)
def get_prompt_version_by_alias(self, name: str, alias: str) -> PromptVersion | None:
"""
Get a prompt version by alias from Unity Catalog.
"""
try:
req_body = message_to_json(GetPromptVersionByAliasRequest(name=name, alias=alias))
endpoint, method = self._get_endpoint_from_method(GetPromptVersionByAliasRequest)
response_proto = self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
alias=alias,
proto_name=GetPromptVersionByAliasRequest,
)
# No longer fetch prompt-level tags - keep them completely separate
return proto_to_mlflow_prompt(response_proto)
except Exception as e:
if isinstance(e, MlflowException) and e.error_code == ErrorCode.Name(
RESOURCE_DOES_NOT_EXIST
):
return None
raise
def set_prompt_alias(self, name: str, alias: str, version: str | int) -> None:
"""
Set an alias for a prompt version in Unity Catalog.
"""
req_body = message_to_json(
SetPromptAliasRequest(name=name, alias=alias, version=str(version))
)
endpoint, method = self._get_endpoint_from_method(SetPromptAliasRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
alias=alias,
version=version,
proto_name=SetPromptAliasRequest,
)
def delete_prompt_alias(self, name: str, alias: str) -> None:
"""
Delete an alias from a prompt in Unity Catalog.
"""
req_body = message_to_json(DeletePromptAliasRequest(name=name, alias=alias))
endpoint, method = self._get_endpoint_from_method(DeletePromptAliasRequest)
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
alias=alias,
proto_name=DeletePromptAliasRequest,
)
def link_prompt_version_to_model(self, name: str, version: str, model_id: str) -> None:
"""
Link a prompt version to a model in Unity Catalog.
Args:
name: Name of the prompt.
version: Version of the prompt to link.
model_id: ID of the model to link to.
"""
# Call the default implementation, since the LinkPromptVersionsToModels API
# will initially be a no-op until the Databricks backend supports it
super().link_prompt_version_to_model(name=name, version=version, model_id=model_id)
prompt_version_entry = PromptVersionLinkEntry(name=name, version=version)
req_body = message_to_json(
LinkPromptVersionsToModelsRequest(
prompt_versions=[prompt_version_entry], model_ids=[model_id]
)
)
endpoint, method = self._get_endpoint_from_method(LinkPromptVersionsToModelsRequest)
try:
# NB: This will not raise an exception if the backend does not support linking.
# We do this to prioritize reduction in errors and log spam while the prompt
# registry remains experimental
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
name=name,
version=version,
model_id=model_id,
proto_name=LinkPromptVersionsToModelsRequest,
)
except Exception:
_logger.debug("Failed to link prompt version to model in unity catalog", exc_info=True)
def link_prompts_to_trace(self, prompt_versions: list[PromptVersion], trace_id: str) -> None:
"""
Link multiple prompt versions to a trace in Unity Catalog.
Args:
prompt_versions: List of PromptVersion objects to link.
trace_id: Trace ID to link to each prompt version.
"""
prompt_version_entries = [
PromptVersionLinkEntry(name=pv.name, version=str(pv.version)) for pv in prompt_versions
]
batch_size = 25
endpoint, method = self._get_endpoint_from_method(LinkPromptsToTracesRequest)
for i in range(0, len(prompt_version_entries), batch_size):
batch = prompt_version_entries[i : i + batch_size]
req_body = message_to_json(
LinkPromptsToTracesRequest(prompt_versions=batch, trace_ids=[trace_id])
)
try:
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
proto_name=LinkPromptsToTracesRequest,
)
except Exception:
_logger.debug("Failed to link prompts to traces in unity catalog", exc_info=True)
def link_prompt_version_to_run(self, name: str, version: str, run_id: str) -> None:
"""
Link a prompt version to a run in Unity Catalog.
Args:
name: Name of the prompt.
version: Version of the prompt to link.
run_id: ID of the run to link to.
"""
super().link_prompt_version_to_run(name=name, version=version, run_id=run_id)
prompt_version_entry = PromptVersionLinkEntry(name=name, version=version)
endpoint, method = self._get_endpoint_from_method(LinkPromptVersionsToRunsRequest)
req_body = message_to_json(
LinkPromptVersionsToRunsRequest(
prompt_versions=[prompt_version_entry], run_ids=[run_id]
)
)
try:
self._edit_endpoint_and_call(
endpoint=endpoint,
method=method,
req_body=req_body,
proto_name=LinkPromptVersionsToRunsRequest,
)
except Exception:
_logger.debug("Failed to link prompt version to run in unity catalog", exc_info=True)
def _edit_endpoint_and_call(self, endpoint, method, req_body, proto_name, **kwargs):
"""
Edit endpoint URL with parameters and make the call.
Args:
endpoint: URL template with placeholders like {name}, {key}
method: HTTP method
req_body: Request body
proto_name: Protobuf message class for response
**kwargs: Parameters to substitute in the endpoint template
"""
# Replace placeholders in endpoint with actual values
for key, value in kwargs.items():
if value is not None:
endpoint = endpoint.replace(f"{{{key}}}", str(value))
# Make the API call
return call_endpoint(
self.get_host_creds(),
endpoint=endpoint,
method=method,
json_body=req_body,
response_proto=self._get_response_from_method(proto_name),
)
| UcModelRegistryStore |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_base.py | {
"start": 609,
"end": 9456
} | class ____(TransformerMixin, metaclass=ABCMeta):
"""
Transformer mixin that performs feature selection given a support mask
This mixin provides a feature selector implementation with `transform` and
`inverse_transform` functionality given an implementation of
`_get_support_mask`.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.base import BaseEstimator
>>> from sklearn.feature_selection import SelectorMixin
>>> class FeatureSelector(SelectorMixin, BaseEstimator):
... def fit(self, X, y=None):
... self.n_features_in_ = X.shape[1]
... return self
... def _get_support_mask(self):
... mask = np.zeros(self.n_features_in_, dtype=bool)
... mask[:2] = True # select the first two features
... return mask
>>> X, y = load_iris(return_X_y=True)
>>> FeatureSelector().fit_transform(X, y).shape
(150, 2)
"""
def get_support(self, indices=False):
"""
Get a mask, or integer index, of the features selected.
Parameters
----------
indices : bool, default=False
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
"""
mask = self._get_support_mask()
return mask if not indices else np.nonzero(mask)[0]
@abstractmethod
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
def transform(self, X):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
# Preserve X when X is a dataframe and the output is configured to
# be pandas.
output_config_dense = _get_output_config("transform", estimator=self)["dense"]
preserve_X = output_config_dense != "default" and _is_pandas_df(X)
# note: we use get_tags instead of __sklearn_tags__ because this is a
# public Mixin.
X = validate_data(
self,
X,
dtype=None,
accept_sparse="csr",
ensure_all_finite=not get_tags(self).input_tags.allow_nan,
skip_check_array=preserve_X,
reset=False,
)
return self._transform(X)
def _transform(self, X):
"""Reduce X to the selected features."""
mask = self.get_support()
if not mask.any():
warnings.warn(
(
"No features were selected: either the data is"
" too noisy or the selection test too strict."
),
UserWarning,
)
if hasattr(X, "iloc"):
return X.iloc[:, :0]
return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))
return _safe_indexing(X, mask, axis=1)
def inverse_transform(self, X):
"""Reverse the transformation operation.
Parameters
----------
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
-------
X_original : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by :meth:`transform`.
"""
if issparse(X):
X = X.tocsc()
# insert additional entries in indptr:
# e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
# col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
col_nonzeros = it.ravel()
indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
Xt = csc_matrix(
(X.data, X.indices, indptr),
shape=(X.shape[0], len(indptr) - 1),
dtype=X.dtype,
)
return Xt
support = self.get_support()
X = check_array(X, dtype=None)
if support.sum() != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
Xt[:, support] = X
return Xt
def get_feature_names_out(self, input_features=None):
"""Mask feature names according to selected features.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
return input_features[self.get_support()]
def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):
"""
Retrieve and aggregate (ndim > 1) the feature importances
from an estimator. Also optionally applies transformation.
Parameters
----------
estimator : estimator
A scikit-learn estimator from which we want to get the feature
importances.
getter : "auto", str or callable
An attribute or a callable to get the feature importance. If `"auto"`,
`estimator` is expected to expose `coef_` or `feature_importances`.
transform_func : {"norm", "square"}, default=None
The transform to apply to the feature importances. By default (`None`)
no transformation is applied.
norm_order : int, default=1
The norm order to apply when `transform_func="norm"`. Only applied
when `importances.ndim > 1`.
Returns
-------
importances : ndarray of shape (n_features,)
The features importances, optionally transformed.
"""
if isinstance(getter, str):
if getter == "auto":
if hasattr(estimator, "coef_"):
getter = attrgetter("coef_")
elif hasattr(estimator, "feature_importances_"):
getter = attrgetter("feature_importances_")
else:
raise ValueError(
"when `importance_getter=='auto'`, the underlying "
f"estimator {estimator.__class__.__name__} should have "
"`coef_` or `feature_importances_` attribute. Either "
"pass a fitted estimator to feature selector or call fit "
"before calling transform."
)
else:
getter = attrgetter(getter)
elif not callable(getter):
raise ValueError("`importance_getter` has to be a string or `callable`")
importances = getter(estimator)
if transform_func is None:
return importances
elif transform_func == "norm":
if importances.ndim == 1:
importances = np.abs(importances)
else:
importances = np.linalg.norm(importances, axis=0, ord=norm_order)
elif transform_func == "square":
if importances.ndim == 1:
importances = safe_sqr(importances)
else:
importances = safe_sqr(importances).sum(axis=0)
else:
raise ValueError(
"Valid values for `transform_func` are "
"None, 'norm' and 'square'. Those two "
"transformation are only supported now"
)
return importances
| SelectorMixin |
python | mahmoud__boltons | boltons/iterutils.py | {
"start": 49886,
"end": 51608
} | class ____:
"""The GUIDerator is an iterator that yields a globally-unique
identifier (GUID) on every iteration. The GUIDs produced are
hexadecimal strings.
Testing shows it to be around 12x faster than the uuid module. By
default it is also more compact, partly due to its default 96-bit
(24-hexdigit) length. 96 bits of randomness means that there is a
1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more
or less uniqueness is desired, the *size* argument can be adjusted
accordingly.
Args:
size (int): character length of the GUID, defaults to 24. Lengths
between 20 and 36 are considered valid.
The GUIDerator has built-in fork protection that causes it to
detect a fork on next iteration and reseed accordingly.
"""
def __init__(self, size=24):
self.size = size
if size < 20 or size > 36:
raise ValueError('expected 20 < size <= 36')
import hashlib
self._sha1 = hashlib.sha1
self.count = itertools.count()
self.reseed()
def reseed(self):
import socket
self.pid = os.getpid()
self.salt = '-'.join([str(self.pid),
socket.gethostname() or '<nohostname>',
str(time.time()),
os.urandom(6).hex()])
return
def __iter__(self):
return self
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
target_bytes = (self.salt + str(next(self.count))).encode('utf8')
hash_text = self._sha1(target_bytes).hexdigest()[:self.size]
return hash_text
next = __next__
| GUIDerator |
python | ray-project__ray | python/ray/serve/_private/benchmarks/locust_utils.py | {
"start": 499,
"end": 762
} | class ____:
history: List[Dict]
total_requests: int
num_failures: int
avg_latency: float
p50_latency: float
p90_latency: float
p99_latency: float
avg_rps: float
stats_in_stages: List[PerformanceStats]
@dataclass
| LocustTestResults |
python | wandb__wandb | wandb/sdk/artifacts/_gqlutils.py | {
"start": 4211,
"end": 8872
} | class ____:
org_name: str
entity_name: str
def __contains__(self, other: str) -> bool:
return other in {self.org_name, self.entity_name}
def resolve_org_entity_name(
client: RetryingClient,
non_org_entity: str | None,
org_or_entity: str | None = None,
) -> str:
# Resolve the portfolio's org entity name.
#
# The `org_or_org_entity` parameter may be empty, an org display name, or an
# org entity name.
#
# If the server cannot fetch the portfolio's org name, return the provided
# value or raise an error if it is empty. Otherwise, return the fetched
# value after validating that the given organization, if provided, matches
# either the display or entity name.
if not non_org_entity:
raise ValueError("Entity name is required to resolve org entity name.")
if "orgEntity" not in allowed_fields(client, "Organization"):
if org_or_entity:
# Server doesn't support fetching orgEntity to match against,
# so assume orgEntity as provided is already correct.
return org_or_entity
raise UnsupportedError(
"Fetching Registry artifacts without inputting an organization "
"is unavailable for your server version. "
"Please upgrade your server to 0.50.0 or later."
)
# Otherwise, fetch candidate orgs to verify or identify the correct orgEntity
# name when possible.
entity = org_info_from_entity(client, non_org_entity)
# Parse possible organization(s) from the response
# ----------------------------------------------------------------------------
# If a team entity was provided, a single organization should exist under
# the team/org entity type.
if entity and (org := entity.organization) and (org_entity := org.org_entity):
# Ensure the provided name, if given, matches the org or org entity name before
# returning the org entity.
org_info = OrgInfo(org_name=org.name, entity_name=org_entity.name)
if (not org_or_entity) or (org_or_entity in org_info):
return org_entity.name
# ----------------------------------------------------------------------------
# If a personal entity was provided, the user may belong to multiple
# organizations.
if entity and (user := entity.user) and (orgs := user.organizations):
org_infos = [
OrgInfo(org_name=org.name, entity_name=org_entity.name)
for org in orgs
if (org_entity := org.org_entity)
]
if org_or_entity:
with suppress(StopIteration):
return next(
info.entity_name for info in org_infos if (org_or_entity in info)
)
if len(org_infos) == 1:
raise ValueError(
f"Expecting the organization name or entity name to match {org_infos[0].org_name!r} "
f"and cannot be linked/fetched with {org_or_entity!r}. "
"Please update the target path with the correct organization name."
)
else:
raise ValueError(
"Personal entity belongs to multiple organizations "
f"and cannot be linked/fetched with {org_or_entity!r}. "
"Please update the target path with the correct organization name "
"or use a team entity in the entity settings."
)
else:
# If no input organization provided, error if entity belongs to:
# - multiple orgs, because we cannot determine which one to use.
# - no orgs, because there's nothing to use.
return one(
(org.entity_name for org in org_infos),
too_short=ValueError(
f"Unable to resolve an organization associated with personal entity: {non_org_entity!r}. "
"This could be because its a personal entity that doesn't belong to any organizations. "
"Please specify the organization in the Registry path or use a team entity in the entity settings."
),
too_long=ValueError(
f"Personal entity {non_org_entity!r} belongs to multiple organizations "
"and cannot be used without specifying the organization name. "
"Please specify the organization in the Registry path or use a team entity in the entity settings."
),
)
raise ValueError(f"Unable to find organization for entity {non_org_entity!r}.")
| OrgInfo |
python | scikit-image__scikit-image | benchmarks/benchmark_rank.py | {
"start": 612,
"end": 1019
} | class ____:
param_names = ["filter3d", "shape3d"]
params = [sorted(all_3d_rank_filters), [(32, 32, 32), (128, 128, 128)]]
def setup(self, filter3d, shape3d):
self.volume = np.random.randint(0, 255, size=shape3d, dtype=np.uint8)
self.footprint_3d = ball(1)
def time_3d_filters(self, filter3d, shape3d):
getattr(rank, filter3d)(self.volume, self.footprint_3d)
| Rank3DSuite |
python | wandb__wandb | wandb/vendor/pygments/lexers/special.py | {
"start": 422,
"end": 820
} | class ____(Lexer):
"""
"Null" lexer, doesn't highlight anything.
"""
name = 'Text only'
aliases = ['text']
filenames = ['*.txt']
mimetypes = ['text/plain']
priority = 0.01
def get_tokens_unprocessed(self, text):
yield 0, Text, text
def analyse_text(text):
return TextLexer.priority
_ttype_cache = {}
line_re = re.compile(b'.*?\n')
| TextLexer |
python | pypa__pip | src/pip/_vendor/urllib3/connection.py | {
"start": 1967,
"end": 10127
} | class ____(_HTTPConnection, object):
"""
Based on :class:`http.client.HTTPConnection` but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass:
.. code-block:: python
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme["http"]
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
#: Whether this proxy connection (if used) verifies the proxy host's
#: certificate.
proxy_is_verified = None
def __init__(self, *args, **kw):
if not six.PY2:
kw.pop("strict", None)
# Pre-set source_address.
self.source_address = kw.get("source_address")
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop("socket_options", self.default_socket_options)
# Proxy options provided by the user.
self.proxy = kw.pop("proxy", None)
self.proxy_config = kw.pop("proxy_config", None)
_HTTPConnection.__init__(self, *args, **kw)
@property
def host(self):
"""
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
In general, SSL certificates don't include the trailing dot indicating a
fully-qualified domain name, and thus, they don't validate properly when
checked against a domain name that includes the dot. In addition, some
servers may not expect to receive the trailing dot when provided.
However, the hostname with trailing dot is critical to DNS resolution; doing a
lookup with the trailing dot will properly only resolve the appropriate FQDN,
whereas a lookup without a trailing dot will search the system's search domain
list. Thus, it's important to keep the original host around for use only in
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
actual TCP connection across which we're going to send HTTP requests).
"""
return self._dns_host.rstrip(".")
@host.setter
def host(self, value):
"""
Setter for the `host` property.
We assume that only urllib3 uses the _dns_host attribute; httplib itself
only uses `host`, and it seems reasonable that other libraries follow suit.
"""
self._dns_host = value
def _new_conn(self):
"""Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
def _is_using_tunnel(self):
# Google App Engine's httplib does not define _tunnel_host
return getattr(self, "_tunnel_host", None)
def _prepare_conn(self, conn):
self.sock = conn
if self._is_using_tunnel():
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def putrequest(self, method, url, *args, **kwargs):
""" """
# Empty docstring because the indentation of CPython's implementation
# is broken but we don't want this method in our documentation.
match = _CONTAINS_CONTROL_CHAR_RE.search(method)
if match:
raise ValueError(
"Method cannot contain non-token characters %r (found at least %r)"
% (method, match.group())
)
return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
def putheader(self, header, *values):
""" """
if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
_HTTPConnection.putheader(self, header, *values)
elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
raise ValueError(
"urllib3.util.SKIP_HEADER only supports '%s'"
% ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
)
def request(self, method, url, body=None, headers=None):
# Update the inner socket's timeout value to send the request.
# This only triggers if the connection is re-used.
if getattr(self, "sock", None) is not None:
self.sock.settimeout(self.timeout)
if headers is None:
headers = {}
else:
# Avoid modifying the headers passed into .request()
headers = headers.copy()
if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
headers["User-Agent"] = _get_default_user_agent()
super(HTTPConnection, self).request(method, url, body=body, headers=headers)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = headers or {}
header_keys = set([six.ensure_str(k.lower()) for k in headers])
skip_accept_encoding = "accept-encoding" in header_keys
skip_host = "host" in header_keys
self.putrequest(
method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
)
if "user-agent" not in header_keys:
self.putheader("User-Agent", _get_default_user_agent())
for header, value in headers.items():
self.putheader(header, value)
if "transfer-encoding" not in header_keys:
self.putheader("Transfer-Encoding", "chunked")
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode("utf8")
len_str = hex(len(chunk))[2:]
to_send = bytearray(len_str.encode())
to_send += b"\r\n"
to_send += chunk
to_send += b"\r\n"
self.send(to_send)
# After the if clause, to always have a closed body
self.send(b"0\r\n\r\n")
| HTTPConnection |
python | geekcomputers__Python | QuestionAnswerVirtualAssistant/backend.py | {
"start": 109,
"end": 8495
} | class ____:
"""
Used for automatic question-answering
It works by building a reverse index store that maps
words to an id. To find the indexed questions that contain
a certain the words in the user question, we then take an
intersection of the ids, ranks the questions to pick the best fit,
then select the answer that maps to that question
"""
def __init__(self):
"""
Returns - None
Input - None
----------
- Initialize database. we use sqlite3
- Check if the tables exist, if not create them
- maintain a class level access to the database
connection object
"""
self.conn = sqlite3.connect("virtualassistant.sqlite3", autocommit=True)
cur = self.conn.cursor()
res = cur.execute("SELECT name FROM sqlite_master WHERE name='IdToQuesAns'")
tables_exist = res.fetchone()
if not tables_exist:
self.conn.execute(
"CREATE TABLE IdToQuesAns(id INTEGER PRIMARY KEY, question TEXT, answer TEXT)"
)
self.conn.execute("CREATE TABLE WordToId (name TEXT, value TEXT)")
cur.execute(
"INSERT INTO WordToId VALUES (?, ?)",
(
"index",
"{}",
),
)
def index_question_answer(self, question, answer):
"""
Returns - string
Input - str: a string of words called question
----------
Indexes the question and answer. It does this by performing two
operations - add the question and answer to the IdToQuesAns, then
adds the words in the question to WordToId
- takes in the question and answer (str)
- passes the question and answer to a method to add them
to IdToQuesAns
- retrieves the id of the inserted ques-answer
- uses the id to call the method that adds the words of
the question to the reverse index WordToId if the word has not
already been indexed
"""
row_id = self._add_to_IdToQuesAns(question.lower(), answer.lower())
cur = self.conn.cursor()
reverse_idx = cur.execute(
"SELECT value FROM WordToId WHERE name='index'"
).fetchone()[0]
reverse_idx = json.loads(reverse_idx)
question = question.split()
for word in question:
if word not in reverse_idx:
reverse_idx[word] = [row_id]
else:
if row_id not in reverse_idx[word]:
reverse_idx[word].append(row_id)
reverse_idx = json.dumps(reverse_idx)
cur = self.conn.cursor()
result = cur.execute(
"UPDATE WordToId SET value = (?) WHERE name='index'", (reverse_idx,)
)
return "index successful"
def _add_to_IdToQuesAns(self, question, answer):
"""
Returns - int: the id of the inserted document
Input - str: a string of words called `document`
---------
- use the class-level connection object to insert the document
into the db
- retrieve and return the row id of the inserted document
"""
cur = self.conn.cursor()
res = cur.execute(
"INSERT INTO IdToQuesAns (question, answer) VALUES (?, ?)",
(
question,
answer,
),
)
return res.lastrowid
def find_questions(self, user_input):
"""
Returns - <class method>: the return value of the _find_questions_with_idx method
Input - str: a string of words called `user_input`, expected to be a question
---------
- retrieve the reverse index
- use the words contained in the user input to find all the idxs
that contain the word
- use idxs to call the _find_questions_with_idx method
- return the result of the called method
"""
cur = self.conn.cursor()
reverse_idx = cur.execute(
"SELECT value FROM WordToId WHERE name='index'"
).fetchone()[0]
reverse_idx = json.loads(reverse_idx)
user_input = user_input.split(" ")
all_docs_with_user_input = []
for term in user_input:
if term in reverse_idx:
all_docs_with_user_input.append(reverse_idx[term])
if not all_docs_with_user_input: # the user_input does not exist
return []
common_idx_of_docs = set(all_docs_with_user_input[0])
for idx in all_docs_with_user_input[1:]:
common_idx_of_docs.intersection_update(idx)
if not common_idx_of_docs: # the user_input does not exist
return []
return self._find_questions_with_idx(common_idx_of_docs)
def _find_questions_with_idx(self, idxs):
"""
Returns - list[str]: the list of questions with the idxs
Input - list of idxs
---------
- use the class-level connection object to retrieve the questions that
have the idx in the input list of idxs.
- retrieve and return these questions as a list
"""
idxs = list(idxs)
cur = self.conn.cursor()
sql = "SELECT id, question, answer FROM IdToQuesAns WHERE id in ({seq})".format(
seq=",".join(["?"] * len(idxs))
)
result = cur.execute(sql, idxs).fetchall()
return result
def find_most_matched_question(self, user_input, corpus):
"""
Returns - list[str]: the list of [(score, most_matching_question)]
Input - user_input, and list of matching questions called corpus
---------
- use the tfidf score to rank the questions and pick the most matching
question
"""
vectorizer = TfidfVectorizer()
tfidf_scores = vectorizer.fit_transform(corpus)
tfidf_array = pd.DataFrame(
tfidf_scores.toarray(), columns=vectorizer.get_feature_names_out()
)
tfidf_dict = tfidf_array.to_dict()
user_input = user_input.split(" ")
result = []
for idx in range(len(corpus)):
result.append([0, corpus[idx]])
for term in user_input:
if term in tfidf_dict:
for idx in range(len(result)):
result[idx][0] += tfidf_dict[term][idx]
return result[0]
def provide_answer(self, user_input):
"""
Returns - str: the answer to the user_input
Input - str: user_input
---------
- use the user_input to get the list of matching questions
- create a corpus which is a list of all matching questions
- create a question_map that maps questions to their respective answers
- use the user_input and corpus to find the most matching question
- return the answer that matches that question from the question_map
"""
matching_questions = self.find_questions(user_input)
corpus = [item[1] for item in matching_questions]
question_map = {
question: answer for (id, question, answer) in matching_questions
}
score, most_matching_question = self.find_most_matched_question(
user_input, corpus
)
return question_map[most_matching_question]
if __name__ == "__main__":
va = QuestionAnswerVirtualAssistant()
va.index_question_answer(
"What are the different types of competitions available on Kaggle",
"Types of Competitions Kaggle Competitions are designed to provide challenges for competitors",
)
print(
va.index_question_answer(
"How to form, manage, and disband teams in a competition",
"Everyone that competes in a Competition does so as a team. A team is a group of one or more users",
)
)
va.index_question_answer(
"What is Data Leakage",
"Data Leakage is the presence of unexpected additional information in the training data",
)
va.index_question_answer(
"How does Kaggle handle cheating",
"Cheating is not taken lightly on Kaggle. We monitor our compliance account",
)
print(va.provide_answer("state Kaggle cheating policy"))
print(va.provide_answer("Tell me what is data leakage"))
| QuestionAnswerVirtualAssistant |
python | ray-project__ray | python/ray/serve/tests/test_api.py | {
"start": 1311,
"end": 1479
} | class ____:
def __init__(self):
self.count = 0
def __call__(self):
self.count += 1
return {"count": self.count}
@serve.deployment
| Counter |
python | django__django | tests/user_commands/management/commands/reverse_url.py | {
"start": 86,
"end": 260
} | class ____(BaseCommand):
"""
This command returns a URL from a reverse() call.
"""
def handle(self, *args, **options):
return reverse("some_url")
| Command |
python | ApeWorX__ape | src/ape/managers/project.py | {
"start": 83698,
"end": 84450
} | class ____(ProjectAPI):
"""
A project with more than 1 valid project API configs, such as a Foundry project
containing an ``ape-config.yaml`` file.
"""
apis: list[ProjectAPI] = []
"""
An ordered list of APIs to use. The last items take precedence as their configs merge.
"""
@property
def is_valid(self) -> bool:
return any(api.is_valid for api in self.apis)
def extract_config(self, **overrides) -> "ApeConfig":
cfgs = []
# Gather all valid APIs, in order.
for api in self.apis:
cfgs.append(api.extract_config().model_dump(exclude_defaults=True, exclude_unset=True))
merged_cfg = merge_configs(*cfgs)
return ApeConfig(**merged_cfg)
| MultiProject |
python | great-expectations__great_expectations | great_expectations/expectations/row_conditions.py | {
"start": 2217,
"end": 4584
} | class ____(BaseModel):
"""
Specify the column in a condition statement.
"""
name: str
def __init__(self, name: str):
super().__init__(name=name)
@override
def __hash__(self) -> int:
return hash(self.name)
@override
def __eq__(self, other: Parameter) -> ComparisonCondition: # type: ignore[override]
return ComparisonCondition(column=self, operator=Operator.EQUAL, parameter=other)
@override
def __ne__(self, other: Parameter) -> ComparisonCondition: # type: ignore[override]
return ComparisonCondition(column=self, operator=Operator.NOT_EQUAL, parameter=other)
def __lt__(self, other: Parameter) -> ComparisonCondition:
return ComparisonCondition(column=self, operator=Operator.LESS_THAN, parameter=other)
def __le__(self, other: Parameter) -> ComparisonCondition:
return ComparisonCondition(
column=self, operator=Operator.LESS_THAN_OR_EQUAL, parameter=other
)
def __gt__(self, other: Parameter) -> ComparisonCondition:
return ComparisonCondition(column=self, operator=Operator.GREATER_THAN, parameter=other)
def __ge__(self, other: Parameter) -> ComparisonCondition:
return ComparisonCondition(
column=self, operator=Operator.GREATER_THAN_OR_EQUAL, parameter=other
)
@public_api
def is_in(self, values: Iterable) -> ComparisonCondition:
"""
Operator for a condition statement that a column's value belongs to a set.
"""
return ComparisonCondition(column=self, operator=Operator.IN, parameter=list(values))
@public_api
def is_not_in(self, values: Iterable) -> ComparisonCondition:
"""
Operator for a condition statement that a column's value does not belong to a set.
"""
return ComparisonCondition(column=self, operator=Operator.NOT_IN, parameter=list(values))
@public_api
def is_null(self) -> NullityCondition:
"""
Operator for a condition statement that a column's value is null.
"""
return NullityCondition(column=self, is_null=True)
@public_api
def is_not_null(self) -> NullityCondition:
"""
Operator for a condition statement that a column's value is not null.
"""
return NullityCondition(column=self, is_null=False)
| Column |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/pgvector_processor.py | {
"start": 1846,
"end": 2034
} | class ____(Protocol):
"""A protocol for embedding configuration.
This is necessary because embedding configs do not have a shared base class.
"""
mode: str
| EmbeddingConfig |
python | explosion__spaCy | spacy/lang/ti/__init__.py | {
"start": 324,
"end": 735
} | class ____(BaseDefaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = lambda text: "ti"
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
suffixes = TOKENIZER_SUFFIXES
writing_system = {"direction": "ltr", "has_case": False, "has_letters": True}
| TigrinyaDefaults |
python | joke2k__faker | faker/providers/date_time/no_NO/__init__.py | {
"start": 46,
"end": 782
} | class ____(DateTimeProvider):
MONTH_NAMES = {
"01": "januar",
"02": "februar",
"03": "mars",
"04": "april",
"05": "mai",
"06": "juni",
"07": "juli",
"08": "august",
"09": "september",
"10": "oktober",
"11": "november",
"12": "desember",
}
DAY_NAMES = {
"0": "søndag",
"1": "mandag",
"2": "tirsdag",
"3": "onsdag",
"4": "torsdag",
"5": "fredag",
"6": "lørdag",
}
def day_of_week(self) -> str:
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/marker/_line.py | {
"start": 233,
"end": 4750
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea.marker"
_path_str = "funnelarea.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
@property
def color(self):
"""
Sets the color of the line enclosing each sector. Defaults to
the `paper_bgcolor` value.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.marker.Line`
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 25082,
"end": 26117
} | class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
def restore_status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JinaExecutorRestoreProgressServicer_to_server(servicer, server):
rpc_method_handlers = {
'restore_status': grpc.unary_unary_rpc_method_handler(
servicer.restore_status,
request_deserializer=jina__pb2.RestoreId.FromString,
response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jina.JinaExecutorRestoreProgress', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| JinaExecutorRestoreProgressServicer |
python | sympy__sympy | sympy/logic/boolalg.py | {
"start": 34885,
"end": 35497
} | class ____(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nand(False, True)
True
>>> Nand(True, True)
False
>>> Nand(x, y)
~(x & y)
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
| Nand |
python | doocs__leetcode | solution/0000-0099/0040.Combination Sum II/Solution2.py | {
"start": 0,
"end": 607
} | class ____:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
def dfs(i: int, s: int):
if s == 0:
ans.append(t[:])
return
if i >= len(candidates) or s < candidates[i]:
return
x = candidates[i]
t.append(x)
dfs(i + 1, s - x)
t.pop()
while i < len(candidates) and candidates[i] == x:
i += 1
dfs(i, s)
candidates.sort()
ans = []
t = []
dfs(0, target)
return ans
| Solution |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 7905,
"end": 8587
} | class ____:
"""https://github.com/pylint-dev/pylint/issues/4837"""
__defaults = {}
__defaults_set = False
def __init__(self, value):
self.value = value
def __init_defaults(self): # [unused-private-member]
if not self.__defaults_set:
type(self).__defaults = { "fur": "pink" }
type(self).__defaults_set = True
def __get_fur_color(self): # [unused-private-member]
color = lookup_attribute(self.__defaults, "fur")
return color
def lookup_attribute(mapping, key):
return mapping[key]
# Test for regression on checking __class__ attribute
# See: https://github.com/pylint-dev/pylint/issues/5261
| Pony |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-memos/llama_index/readers/memos/base.py | {
"start": 196,
"end": 1492
} | class ____(BaseReader):
"""
Memos reader.
Reads content from an Memos.
"""
def __init__(self, host: str = "https://demo.usememos.com/") -> None:
"""Init params."""
self._memoUrl = urljoin(host, "api/memo")
def load_data(self, params: Dict = {}) -> List[Document]:
"""
Load data from RSS feeds.
Args:
params (Dict): Filtering parameters.
Returns:
List[Document]: List of documents.
"""
import requests
documents = []
realUrl = self._memoUrl
if not params:
realUrl = urljoin(self._memoUrl, "all", False)
try:
req = requests.get(realUrl, params)
res = req.json()
except ValueError:
raise ValueError("Your Memo URL is not valid")
if "data" not in res:
raise ValueError("Invalid Memo response")
memos = res["data"]
for memo in memos:
content = memo["content"]
extra_info = {
"creator": memo["creator"],
"resource_list": memo["resourceList"],
id: memo["id"],
}
documents.append(Document(text=content, extra_info=extra_info))
return documents
| MemosReader |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v2_checkpoint_test.py | {
"start": 1749,
"end": 14383
} | class ____(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def make_checkpoint_and_get_embedding(self, name, model, num_rows):
"""Saves model to checkpoint name, retrieves embedding variables."""
checkpoint = util.Checkpoint(model=model)
checkpoint.save(self._get_tmpdir(name, 'save'))
# Get the name of the table video variable which should be the only
# [8, 4] shaped tensor in the checkpoint. Note that we do this
# as the key can change.
variables = checkpoint_utils.list_variables(self._get_tmpdir(name))
variables = [name for name, size in variables if size == [num_rows, 4]]
if len(variables) != 1:
raise RuntimeError('Found {} copies of the parameter variable in the '
'checkpoint. Exactly one copy exported.'.format(
len(variables)))
return checkpoint_utils.load_variable(self._get_tmpdir(name), variables[0])
def test_checkpoint_save_retrieves(self):
strategy = self._get_strategy()
num_rows = strategy.num_replicas_in_sync
with strategy.scope():
first_mid_level_contents = np.ones((num_rows, 4))
first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, first_mid_level_optimizer)
first_mid_level.build(64)
# Ensure that the variables from the first model are loaded.
first_mid_level._load_variables()
self.assertAllClose(
first_mid_level_contents,
self.make_checkpoint_and_get_embedding('before_load', first_mid_level,
num_rows),
msg='Checkpoint should contain values from the first api object.')
# Reinitialize the tpu.
tpu_cluster_resolver.initialize_tpu_system(self.resolver)
with strategy.scope():
second_mid_level_contents = np.ones((num_rows, 4)) * 2
second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(second_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
second_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, second_mid_level_optimizer)
second_mid_level.build(64)
second_mid_level._load_variables()
# When we load the variables from the second mid level API object to the TPU
# we expect that checkpointing the first mid level API object will now
# retrieve the values from the TPU which are now different from the current
# variables in the first mid level.
self.assertAllClose(
second_mid_level_contents,
self.make_checkpoint_and_get_embedding('after_load', first_mid_level,
num_rows),
msg='Checkpoint should contain values from the second api object.')
def test_checkpoint_restore_loads(self):
strategy = self._get_strategy()
num_rows = strategy.num_replicas_in_sync
def get_values(mid):
return ops.convert_to_tensor(
mid._variables['table']['parameters'].variables[0])
with strategy.scope():
first_mid_level_contents = np.ones((num_rows, 4))
first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, first_mid_level_optimizer)
first_mid_level.build(64)
first_mid_level._load_variables()
first_checkpoint = util.Checkpoint(model=first_mid_level)
first_checkpoint.save(self._get_tmpdir('restore', 'save'))
tpu_cluster_resolver.initialize_tpu_system(self.resolver)
with strategy.scope():
second_mid_level_contents = np.ones((num_rows, 4)) * 2
second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(second_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
second_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, second_mid_level_optimizer)
second_mid_level.build(64)
second_mid_level._load_variables()
self.assertAllClose(
second_mid_level_contents,
get_values(second_mid_level),
msg='Second mid level api should contain its initial values.',
)
# We restore the checkpoint of our first model into our second model.
# This should load the first mid level API object onto the TPU.
second_checkpoint = util.Checkpoint(model=second_mid_level)
second_checkpoint.restore(self._get_tmpdir('restore', 'save-1'))
# Call retrieve here as a way to check what the TPU contains.
# Calling the retrieve ops directly might make for a cleaner separation of
# test and module, though.
second_mid_level._retrieve_variables()
self.assertAllClose(
first_mid_level_contents,
get_values(second_mid_level),
msg='Second mid level api should have retrieved the first model values.'
)
def test_checkpoint_restore_before_variable_creation(self):
self.skip_if_oss()
class TestModule(module.Module):
def __init__(self, initializer, rows):
self._initializer = initializer
self._rows = rows
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self._rows,
dim=4,
initializer=self._initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
optimizer = tpu_embedding_v2_utils.SGD()
self.tpu_embedding = tpu_embedding_v2.TPUEmbedding(
feature_config, optimizer)
def create_embedding(self):
# We aren't training so batch_size here doesn't matter.
self.tpu_embedding.build(64)
strategy = self._get_strategy()
with strategy.scope():
module1 = TestModule(init_ops_v2.Ones(),
strategy.num_replicas_in_sync * 2)
module1.create_embedding()
checkpoint = util.Checkpoint(test_module=module1)
checkpoint.save(self._get_tmpdir('restore_before_create', 'save'))
# Reinitialize the tpu
strategy = self._get_strategy()
with strategy.scope():
module2 = TestModule(init_ops_v2.Zeros(),
strategy.num_replicas_in_sync * 2)
checkpoint = util.Checkpoint(test_module=module2)
checkpoint.restore(self._get_tmpdir('restore_before_create', 'save-1'))
with strategy.scope():
module2.create_embedding()
def get_values(mid):
return mid._variables['table']['parameters'].variables[0].numpy()
self.assertAllClose(
np.ones((strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
# Fetch the values from the TPU to check that they are the same.
module2.tpu_embedding._retrieve_variables()
self.assertAllClose(
np.ones((strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
def test_model_export_cpu(self):
strategy = self._get_strategy()
num_rows = strategy.num_replicas_in_sync
with strategy.scope():
first_mid_level_contents = np.ones((num_rows, 4))
first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, first_mid_level_optimizer)
first_mid_level.build(64)
cpu_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
cpu_mid_level = tpu_embedding_v2.TPUEmbedding(feature_config,
cpu_mid_level_optimizer)
cpu_mid_level.build(64)
first_mid_level._load_variables()
tpu_checkpoint = util.Checkpoint(model=first_mid_level)
tpu_checkpoint.save(self._get_tmpdir('export_cpu', 'save'))
# We restore the checkpoint of our tpu mid level onto our cpu mid level.
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.restore(self._get_tmpdir('export_cpu', 'save-1'))
@def_function.function
def serve_tensors(features):
features = tpu_embedding_for_serving.cpu_embedding_lookup(
features, None, cpu_mid_level.embedding_tables,
cpu_mid_level._feature_config)
return features[0]
signatures = {
'serving_default':
serve_tensors.get_concrete_function((tensor_spec.TensorSpec(
shape=(2,), dtype=dtypes.int32, name='feature'),))
}
save.save(
cpu_mid_level,
export_dir=self._get_tmpdir('export_cpu', 'exported_model'),
signatures=signatures)
imported = load.load(self._get_tmpdir('export_cpu', 'exported_model'))
predict_fn = imported.signatures['serving_default']
input_feature_value = np.array([1, 0])
input_batch = (constant_op.constant(
input_feature_value, dtype=dtypes.int32),)
prediction = predict_fn(*input_batch)['output_0']
self.assertAllClose(prediction.numpy(),
first_mid_level_contents[input_feature_value])
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_check_checkpoint_variable_names_are_same_on_cpu_and_tpu(
self, optimizer):
# Reinitialize the TPU so that we can re-initialize the embeddings with the
# given optimizer.
if optimizer != tpu_embedding_v2_utils.SGD:
self.skip_if_oss()
strategy = self._get_strategy()
num_rows = strategy.num_replicas_in_sync
with strategy.scope():
first_mid_level_contents = np.ones((num_rows, 4))
first_mid_level_optimizer = optimizer(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_rows,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config, first_mid_level_optimizer)
first_mid_level.build(64)
cpu_mid_level_optimizer = optimizer(learning_rate=0.1)
cpu_mid_level = tpu_embedding_v2.TPUEmbedding(feature_config,
cpu_mid_level_optimizer)
cpu_mid_level.build(64)
tpu_checkpoint = util.Checkpoint(model=first_mid_level)
tpu_checkpoint.save(self._get_tmpdir('save-tpu', 'save'))
tpu_variables = checkpoint_utils.list_variables(
self._get_tmpdir('save-tpu'))
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.save(self._get_tmpdir('save-cpu', 'save'))
cpu_variables = checkpoint_utils.list_variables(
self._get_tmpdir('save-cpu'))
self.assertAllEqual(tpu_variables, cpu_variables)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingCheckpointTest |
python | sphinx-doc__sphinx | sphinx/environment/__init__.py | {
"start": 35485,
"end": 43484
} | class ____:
"""Temporary data storage while reading a document.
This class is only for internal use. Please don't use this in your extensions.
It will be removed or changed without notice.
The only stable API is via ``env.current_document``.
"""
__slots__ = (
'_parser',
'_serial_numbers',
'_extension_data',
'autodoc_annotations',
'autodoc_class',
'autodoc_module',
'c_last_symbol',
'c_namespace_stack',
'c_parent_symbol',
'cpp_domain_name',
'cpp_last_symbol',
'cpp_namespace_stack',
'cpp_parent_symbol',
'default_domain',
'default_role',
'docname',
'highlight_language',
'obj_desc_name',
'reading_started_at',
)
# Map of old-style temp_data keys to _CurrentDocument attributes
__attr_map: Final = {
'_parser': '_parser',
'annotations': 'autodoc_annotations',
'autodoc:class': 'autodoc_class',
'autodoc:module': 'autodoc_module',
'c:last_symbol': 'c_last_symbol',
'c:namespace_stack': 'c_namespace_stack',
'c:parent_symbol': 'c_parent_symbol',
'cpp:domain_name': 'cpp_domain_name',
'cpp:last_symbol': 'cpp_last_symbol',
'cpp:namespace_stack': 'cpp_namespace_stack',
'cpp:parent_symbol': 'cpp_parent_symbol',
'default_domain': 'default_domain',
'default_role': 'default_role',
'docname': 'docname',
'highlight_language': 'highlight_language',
'object': 'obj_desc_name',
'started_at': 'reading_started_at',
}
# Attributes that should reset to None if popped.
__attr_default_none: Final = frozenset({
'_parser',
'c:last_symbol',
'c:parent_symbol',
'cpp:last_symbol',
'cpp:parent_symbol',
'default_domain',
})
def __init__(
self,
*,
docname: str = '',
default_role: str = '',
default_domain: Domain | None = None,
) -> None:
#: The docname of the document currently being parsed.
self.docname: str = docname
#: The default role for the current document.
#: Set by the ``.. default-role::`` directive.
self.default_role: str = default_role
#: The default domain for the current document.
#: Set by the ``.. default-domain::`` directive.
self.default_domain: Domain | None = default_domain
#: The parser being used to parse the current document.
self._parser: Parser | None = None
#: The default language for syntax highlighting.
#: Set by the ``.. highlight::`` directive to override
#: the ``highlight_language`` config value.
self.highlight_language: str = ''
#: The current object's name.
#: Used in the Changes builder.
self.obj_desc_name: str = ''
#: Records type hints of Python objects in the current document.
#: Used in ``sphinx.ext.autodoc.typehints``.
#: Maps object names to maps of attribute names -> type hints.
self.autodoc_annotations: dict[str, dict[str, str]] = {}
#: The current Python class name.
#: Used in ``sphinx.ext.autodoc``.
self.autodoc_class: str = ''
#: The current Python module name.
#: Used in ``sphinx.ext.autodoc``.
self.autodoc_module: str = ''
#: The most-recently added declaration in a directive.
#: Used in the C Domain.
self.c_last_symbol: CSymbol | None = None
#: The stack of namespace scopes, altered by the ``.. c:namespace::``
#: and ``.. c:namespace-(push|pop)::``directives.
#: Used in the C Domain.
self.c_namespace_stack: list[CSymbol] = []
#: The parent declaration.
#: Used in the C Domain.
self.c_parent_symbol: CSymbol | None = None
#: A stack of the string representation of declarations,
#: used to format the table of contents entry.
#: Used in the C++ Domain.
self.cpp_domain_name: tuple[str, ...] = ()
#: The most-recently added declaration in a directive.
#: Used in the C++ Domain.
self.cpp_last_symbol: CPPSymbol | None = None
#: The stack of namespace scopes, altered by the ``.. cpp:namespace::``
#: and ``.. cpp:namespace-(push|pop)::``directives.
#: Used in the C++ Domain.
self.cpp_namespace_stack: list[CPPSymbol] = []
#: The parent declaration.
#: Used in the C++ Domain.
self.cpp_parent_symbol: CPPSymbol | None = None
#: Records the time when reading begain for the current document.
#: Used in ``sphinx.ext.duration``.
self.reading_started_at: float = 0.0
# Used for generating unique serial numbers.
self._serial_numbers: dict[str, int] = {}
# Stores properties relating to the current document set by extensions.
self._extension_data: dict[str, Any] = {}
def new_serial_number(self, category: str = '', /) -> int:
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document & category.
"""
current = self._serial_numbers.get(category, 0)
self._serial_numbers[category] = current + 1
return current
# Mapping interface:
def __getitem__(self, item: str) -> Any:
if item in self.__attr_map:
return getattr(self, self.__attr_map[item])
return self._extension_data[item]
def __setitem__(self, key: str, value: Any) -> None:
if key in self.__attr_map:
setattr(self, self.__attr_map[key], value)
else:
self._extension_data[key] = value
def __delitem__(self, key: str) -> None:
self.pop(key, default=None)
def __contains__(self, item: str) -> bool:
if item in {'c:parent_symbol', 'cpp:parent_symbol'}:
return getattr(self, self.__attr_map[item]) is not None
return item in self.__attr_map or item in self._extension_data
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
def __len__(self) -> int:
return len(self.__attr_map) + len(self._extension_data)
def keys(self) -> Iterable[str]:
return frozenset(self.__attr_map.keys() | self._extension_data.keys())
def items(self) -> Iterable[tuple[str, Any]]:
for key in self.keys():
yield key, self[key]
def values(self) -> Iterable[Any]:
for key in self.keys():
yield self[key]
def get(self, key: str, default: Any | None = None) -> Any | None:
try:
return self[key]
except KeyError:
return default
__sentinel = object()
def pop(self, key: str, default: Any | None = __sentinel) -> Any | None:
if key in self.__attr_map:
# the keys in __attr_map always exist, so ``default`` is ignored
value = getattr(self, self.__attr_map[key])
if key in self.__attr_default_none:
default = None
else:
default = type(value)() # set key to type's default
setattr(self, self.__attr_map[key], default)
return value
if default is self.__sentinel:
return self._extension_data.pop(key)
return self._extension_data.pop(key, default)
def setdefault(self, key: str, default: Any | None = None) -> Any | None:
return self._extension_data.setdefault(key, default)
def clear(self) -> None:
_CurrentDocument.__init__(self) # NoQA: PLC2801
def update(self, other: Iterable[tuple[str, Any]] = (), /, **kwargs: Any) -> None:
other_dict = dict(other) if not isinstance(other, dict) else other
for dct in other_dict, kwargs:
for key, value in dct.items():
self[key] = value
| _CurrentDocument |
python | gawel__pyquery | pyquery/pyquery.py | {
"start": 4976,
"end": 54154
} | class ____(list):
"""The main class
"""
_translator_class = JQueryTranslator
def __init__(self, *args, **kwargs):
html = None
elements = []
self._base_url = None
self.parser = kwargs.pop('parser', None)
if 'parent' in kwargs:
self._parent = kwargs.pop('parent')
else:
self._parent = no_default
if 'css_translator' in kwargs:
self._translator = kwargs.pop('css_translator')
elif self.parser in ('xml',):
self._translator = self._translator_class(xhtml=True)
elif self._parent is not no_default:
self._translator = self._parent._translator
else:
self._translator = self._translator_class(xhtml=False)
self.namespaces = kwargs.pop('namespaces', None)
if kwargs:
# specific case to get the dom
if 'filename' in kwargs:
html = open(kwargs['filename'],
encoding=kwargs.get('encoding'))
elif 'url' in kwargs:
url = kwargs.pop('url')
if 'opener' in kwargs:
opener = kwargs.pop('opener')
html = opener(url, **kwargs)
else:
html = url_opener(url, kwargs)
if not self.parser:
self.parser = 'html'
self._base_url = url
else:
raise ValueError('Invalid keyword arguments %s' % kwargs)
elements = fromstring(html, self.parser)
# close open descriptor if possible
if hasattr(html, 'close'):
try:
html.close()
except Exception:
pass
else:
# get nodes
# determine context and selector if any
selector = context = no_default
length = len(args)
if length == 1:
context = args[0]
elif length == 2:
selector, context = args
else:
raise ValueError(
"You can't do that. Please, provide arguments")
# get context
if isinstance(context, basestring):
try:
elements = fromstring(context, self.parser)
except Exception:
raise
elif isinstance(context, self.__class__):
# copy
elements = context[:]
elif isinstance(context, list):
elements = context
elif isinstance(context, etree._Element):
elements = [context]
else:
raise TypeError(context)
# select nodes
if elements and selector is not no_default:
xpath = self._css_to_xpath(selector)
results = []
for tag in elements:
results.extend(
tag.xpath(xpath, namespaces=self.namespaces))
elements = results
list.__init__(self, elements)
def _css_to_xpath(self, selector, prefix='descendant-or-self::'):
selector = selector.replace('[@', '[')
return self._translator.css_to_xpath(selector, prefix)
def _copy(self, *args, **kwargs):
kwargs.setdefault('namespaces', self.namespaces)
return self.__class__(*args, **kwargs)
def __call__(self, *args, **kwargs):
"""return a new PyQuery instance
"""
length = len(args)
if length == 0:
raise ValueError('You must provide at least a selector')
if args[0] == '':
return self._copy([])
if (len(args) == 1 and
isinstance(args[0], str) and
not args[0].startswith('<')):
args += (self,)
result = self._copy(*args, parent=self, **kwargs)
return result
# keep original list api prefixed with _
_append = list.append
_extend = list.extend
# improve pythonic api
def __add__(self, other):
assert isinstance(other, self.__class__)
return self._copy(self[:] + other[:])
def extend(self, other):
"""Extend with another PyQuery object"""
assert isinstance(other, self.__class__)
self._extend(other[:])
return self
def items(self, selector=None):
"""Iter over elements. Return PyQuery objects:
>>> d = PyQuery('<div><span>foo</span><span>bar</span></div>')
>>> [i.text() for i in d.items('span')]
['foo', 'bar']
>>> [i.text() for i in d('span').items()]
['foo', 'bar']
>>> list(d.items('a')) == list(d('a').items())
True
"""
if selector:
elems = self(selector) or []
else:
elems = self
for elem in elems:
yield self._copy(elem, parent=self)
def xhtml_to_html(self):
"""Remove xhtml namespace:
>>> doc = PyQuery(
... '<html xmlns="http://www.w3.org/1999/xhtml"></html>')
>>> doc
[<{http://www.w3.org/1999/xhtml}html>]
>>> doc.xhtml_to_html()
[<html>]
"""
try:
root = self[0].getroottree()
except IndexError:
pass
else:
lxml.html.xhtml_to_html(root)
return self
def remove_namespaces(self):
"""Remove all namespaces:
>>> doc = PyQuery('<foo xmlns="http://example.com/foo"></foo>')
>>> doc
[<{http://example.com/foo}foo>]
>>> doc.remove_namespaces()
[<foo>]
"""
try:
root = self[0].getroottree()
except IndexError:
pass
else:
for el in root.iter('{*}*'):
if el.tag.startswith('{'):
el.tag = el.tag.split('}', 1)[1]
return self
def __str__(self):
"""xml representation of current nodes::
>>> xml = PyQuery(
... '<script><![[CDATA[ ]></script>', parser='html_fragments')
>>> print(str(xml))
<script><![[CDATA[ ]></script>
"""
return ''.join([etree.tostring(e, encoding=str) for e in self])
def __unicode__(self):
"""xml representation of current nodes"""
return u''.join([etree.tostring(e, encoding=str)
for e in self])
def __html__(self):
"""html representation of current nodes::
>>> html = PyQuery(
... '<script><![[CDATA[ ]></script>', parser='html_fragments')
>>> print(html.__html__())
<script><![[CDATA[ ]></script>
"""
return u''.join([lxml.html.tostring(e, encoding=str)
for e in self])
def __repr__(self):
r = []
try:
for el in self:
c = el.get('class')
c = c and '.' + '.'.join(c.split(' ')) or ''
id = el.get('id')
id = id and '#' + id or ''
r.append('<%s%s%s>' % (el.tag, id, c))
return '[' + (', '.join(r)) + ']'
except AttributeError:
return list.__repr__(self)
@property
def root(self):
"""return the xml root element
"""
if self._parent is not no_default:
return self._parent[0].getroottree()
return self[0].getroottree()
@property
def encoding(self):
"""return the xml encoding of the root element
"""
root = self.root
if root is not None:
return self.root.docinfo.encoding
##############
# Traversing #
##############
def _filter_only(self, selector, elements, reverse=False, unique=False):
"""Filters the selection set only, as opposed to also including
descendants.
"""
if selector is None:
results = elements
else:
xpath = self._css_to_xpath(selector, 'self::')
results = []
for tag in elements:
results.extend(tag.xpath(xpath, namespaces=self.namespaces))
if reverse:
results.reverse()
if unique:
result_list = results
results = []
for item in result_list:
if item not in results:
results.append(item)
return self._copy(results, parent=self)
def parent(self, selector=None):
return self._filter_only(
selector,
[e.getparent() for e in self if e.getparent() is not None],
unique=True)
def prev(self, selector=None):
return self._filter_only(
selector,
[e.getprevious() for e in self if e.getprevious() is not None])
def next(self, selector=None):
return self._filter_only(
selector,
[e.getnext() for e in self if e.getnext() is not None])
def _traverse(self, method):
for e in self:
current = getattr(e, method)()
while current is not None:
yield current
current = getattr(current, method)()
def _traverse_parent_topdown(self):
for e in self:
this_list = []
current = e.getparent()
while current is not None:
this_list.append(current)
current = current.getparent()
this_list.reverse()
for j in this_list:
yield j
def _next_all(self):
return [e for e in self._traverse('getnext')]
@with_camel_case_alias
def next_all(self, selector=None):
"""
>>> h = '<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>'
>>> d = PyQuery(h)
>>> d('p:last').next_all()
[<img>]
>>> d('p:last').nextAll()
[<img>]
"""
return self._filter_only(selector, self._next_all())
@with_camel_case_alias
def next_until(self, selector, filter_=None):
"""
>>> h = '''
... <h2>Greeting 1</h2>
... <p>Hello!</p><p>World!</p>
... <h2>Greeting 2</h2><p>Bye!</p>
... '''
>>> d = PyQuery(h)
>>> d('h2:first').nextUntil('h2')
[<p>, <p>]
"""
return self._filter_only(
filter_, [
e
for q in itertools.takewhile(
lambda q: not q.is_(selector), self.next_all().items())
for e in q
]
)
def _prev_all(self):
return [e for e in self._traverse('getprevious')]
@with_camel_case_alias
def prev_all(self, selector=None):
"""
>>> h = '<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>'
>>> d = PyQuery(h)
>>> d('p:last').prev_all()
[<p.hello>]
>>> d('p:last').prevAll()
[<p.hello>]
"""
return self._filter_only(selector, self._prev_all(), reverse=True)
def siblings(self, selector=None):
"""
>>> h = '<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>'
>>> d = PyQuery(h)
>>> d('.hello').siblings()
[<p>, <img>]
>>> d('.hello').siblings('img')
[<img>]
"""
return self._filter_only(selector, self._prev_all() + self._next_all())
def parents(self, selector=None):
"""
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p></span>')
>>> d('p').parents()
[<span>]
>>> d('.hello').parents('span')
[<span>]
>>> d('.hello').parents('p')
[]
"""
return self._filter_only(
selector,
[e for e in self._traverse_parent_topdown()],
unique=True
)
def children(self, selector=None):
"""Filter elements that are direct children of self using optional
selector:
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p></span>')
>>> d
[<span>]
>>> d.children()
[<p.hello>, <p>]
>>> d.children('.hello')
[<p.hello>]
"""
elements = [child for tag in self for child in tag.getchildren()]
return self._filter_only(selector, elements)
def closest(self, selector=None):
"""
>>> d = PyQuery(
... '<div class="hello"><p>This is a '
... '<strong class="hello">test</strong></p></div>')
>>> d('strong').closest('div')
[<div.hello>]
>>> d('strong').closest('.hello')
[<strong.hello>]
>>> d('strong').closest('form')
[]
"""
result = []
for current in self:
while (current is not None and
not self._copy(current).is_(selector)):
current = current.getparent()
if current is not None:
result.append(current)
return self._copy(result, parent=self)
def contents(self):
"""
Return contents (with text nodes):
>>> d = PyQuery('hello <b>bold</b>')
>>> d.contents() # doctest: +ELLIPSIS
['hello ', <Element b at ...>]
"""
results = []
for elem in self:
results.extend(elem.xpath('child::text()|child::*',
namespaces=self.namespaces))
return self._copy(results, parent=self)
def filter(self, selector):
"""Filter elements in self using selector (string or function):
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p>')
>>> d('p')
[<p.hello>, <p>]
>>> d('p').filter('.hello')
[<p.hello>]
>>> d('p').filter(lambda i: i == 1)
[<p>]
>>> d('p').filter(lambda i: PyQuery(this).text() == 'Hi')
[<p.hello>]
>>> d('p').filter(lambda i, this: PyQuery(this).text() == 'Hi')
[<p.hello>]
"""
if not hasattr(selector, '__call__'):
return self._filter_only(selector, self)
else:
elements = []
args = getargspec(callback)
try:
for i, this in enumerate(self):
if len(args) == 1:
selector.__globals__['this'] = this
if callback(selector, i, this):
elements.append(this)
finally:
f_globals = selector.__globals__
if 'this' in f_globals:
del f_globals['this']
return self._copy(elements, parent=self)
def not_(self, selector):
"""Return elements that don't match the given selector:
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p><div></div>')
>>> d('p').not_('.hello')
[<p>]
"""
exclude = set(self._copy(selector, self))
return self._copy([e for e in self if e not in exclude],
parent=self)
def is_(self, selector):
"""Returns True if selector matches at least one current element, else
False:
>>> d = PyQuery('<p class="hello"><span>Hi</span></p><p>Bye</p>')
>>> d('p').eq(0).is_('.hello')
True
>>> d('p').eq(0).is_('span')
False
>>> d('p').eq(1).is_('.hello')
False
..
"""
return bool(self._filter_only(selector, self))
def find(self, selector):
"""Find elements using selector traversing down from self:
>>> m = '<p><span><em>Whoah!</em></span></p><p><em> there</em></p>'
>>> d = PyQuery(m)
>>> d('p').find('em')
[<em>, <em>]
>>> d('p').eq(1).find('em')
[<em>]
"""
xpath = self._css_to_xpath(selector)
results = [child.xpath(xpath, namespaces=self.namespaces)
for tag in self
for child in tag.getchildren()]
# Flatten the results
elements = []
for r in results:
elements.extend(r)
return self._copy(elements, parent=self)
def eq(self, index):
"""Return PyQuery of only the element with the provided index::
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p><div></div>')
>>> d('p').eq(0)
[<p.hello>]
>>> d('p').eq(1)
[<p>]
>>> d('p').eq(2)
[]
..
"""
# Slicing will return empty list when index=-1
# we should handle out of bound by ourselves
try:
items = self[index]
except IndexError:
items = []
return self._copy(items, parent=self)
def each(self, func):
"""apply func on each nodes
"""
try:
for i, element in enumerate(self):
func.__globals__['this'] = element
if callback(func, i, element) is False:
break
finally:
f_globals = func.__globals__
if 'this' in f_globals:
del f_globals['this']
return self
def map(self, func):
"""Returns a new PyQuery after transforming current items with func.
func should take two arguments - 'index' and 'element'. Elements can
also be referred to as 'this' inside of func::
>>> d = PyQuery('<p class="hello">Hi there</p><p>Bye</p><br />')
>>> d('p').map(lambda i, e: PyQuery(e).text())
['Hi there', 'Bye']
>>> d('p').map(lambda i, e: len(PyQuery(this).text()))
[8, 3]
>>> d('p').map(lambda i, e: PyQuery(this).text().split())
['Hi', 'there', 'Bye']
"""
items = []
try:
for i, element in enumerate(self):
func.__globals__['this'] = element
result = callback(func, i, element)
if result is not None:
if not isinstance(result, list):
items.append(result)
else:
items.extend(result)
finally:
f_globals = func.__globals__
if 'this' in f_globals:
del f_globals['this']
return self._copy(items, parent=self)
@property
def length(self):
return len(self)
def size(self):
return len(self)
def end(self):
"""Break out of a level of traversal and return to the parent level.
>>> m = '<p><span><em>Whoah!</em></span></p><p><em> there</em></p>'
>>> d = PyQuery(m)
>>> d('p').eq(1).find('em').end().end()
[<p>, <p>]
"""
return self._parent
##############
# Attributes #
##############
def attr(self, *args, **kwargs):
"""Attributes manipulation
"""
mapping = {'class_': 'class', 'for_': 'for'}
attr = value = no_default
length = len(args)
if length == 1:
attr = args[0]
attr = mapping.get(attr, attr)
elif length == 2:
attr, value = args
attr = mapping.get(attr, attr)
elif kwargs:
attr = {}
for k, v in kwargs.items():
attr[mapping.get(k, k)] = v
else:
raise ValueError('Invalid arguments %s %s' % (args, kwargs))
if not self:
return None
elif isinstance(attr, dict):
for tag in self:
for key, value in attr.items():
tag.set(key, value)
elif value is no_default:
return self[0].get(attr)
elif value is None:
return self.remove_attr(attr)
else:
for tag in self:
tag.set(attr, value)
return self
@with_camel_case_alias
def remove_attr(self, name):
"""Remove an attribute::
>>> d = PyQuery('<div id="myid"></div>')
>>> d.remove_attr('id')
[<div>]
>>> d.removeAttr('id')
[<div>]
..
"""
for tag in self:
try:
del tag.attrib[name]
except KeyError:
pass
return self
attr = FlexibleElement(pget=attr, pdel=remove_attr)
#######
# CSS #
#######
def height(self, value=no_default):
"""set/get height of element
"""
return self.attr('height', value)
def width(self, value=no_default):
"""set/get width of element
"""
return self.attr('width', value)
@with_camel_case_alias
def has_class(self, name):
"""Return True if element has class::
>>> d = PyQuery('<div class="myclass"></div>')
>>> d.has_class('myclass')
True
>>> d.hasClass('myclass')
True
..
"""
return self.is_('.%s' % name)
@with_camel_case_alias
def add_class(self, value):
"""Add a css class to elements::
>>> d = PyQuery('<div></div>')
>>> d.add_class('myclass')
[<div.myclass>]
>>> d.addClass('myclass')
[<div.myclass>]
..
"""
for tag in self:
values = value.split(' ')
classes = (tag.get('class') or '').split()
classes += [v for v in values if v not in classes]
tag.set('class', ' '.join(classes))
return self
@with_camel_case_alias
def remove_class(self, value):
"""Remove a css class to elements::
>>> d = PyQuery('<div class="myclass"></div>')
>>> d.remove_class('myclass')
[<div>]
>>> d.removeClass('myclass')
[<div>]
..
"""
for tag in self:
values = value.split(' ')
classes = set((tag.get('class') or '').split())
classes.difference_update(values)
classes.difference_update([''])
classes = ' '.join(classes)
if classes.strip():
tag.set('class', classes)
elif tag.get('class'):
tag.set('class', classes)
return self
@with_camel_case_alias
def toggle_class(self, value):
"""Toggle a css class to elements
>>> d = PyQuery('<div></div>')
>>> d.toggle_class('myclass')
[<div.myclass>]
>>> d.toggleClass('myclass')
[<div>]
"""
for tag in self:
values = value.split(' ')
classes = (tag.get('class') or '').split()
values_to_add = [v for v in values if v not in classes]
values_to_del = [v for v in values if v in classes]
classes = [v for v in classes if v not in values_to_del]
classes += values_to_add
tag.set('class', ' '.join(classes))
return self
def css(self, *args, **kwargs):
"""css attributes manipulation
"""
attr = value = no_default
length = len(args)
if length == 1:
attr = args[0]
elif length == 2:
attr, value = args
elif kwargs:
attr = kwargs
else:
raise ValueError('Invalid arguments %s %s' % (args, kwargs))
if isinstance(attr, dict):
for tag in self:
stripped_keys = [key.strip().replace('_', '-')
for key in attr.keys()]
current = [el.strip()
for el in (tag.get('style') or '').split(';')
if el.strip()
and el.split(':')[0].strip() not in stripped_keys]
for key, value in attr.items():
key = key.replace('_', '-')
current.append('%s: %s' % (key, value))
tag.set('style', '; '.join(current))
elif isinstance(value, basestring):
attr = attr.replace('_', '-')
for tag in self:
current = [
el.strip()
for el in (tag.get('style') or '').split(';')
if (el.strip() and
not el.split(':')[0].strip() == attr.strip())]
current.append('%s: %s' % (attr, value))
tag.set('style', '; '.join(current))
return self
css = FlexibleElement(pget=css, pset=css)
###################
# CORE UI EFFECTS #
###################
def hide(self):
"""Add display:none to elements style:
>>> print(PyQuery('<div style="display:none;"/>').hide())
<div style="display: none"/>
"""
return self.css('display', 'none')
def show(self):
"""Add display:block to elements style:
>>> print(PyQuery('<div />').show())
<div style="display: block"/>
"""
return self.css('display', 'block')
########
# HTML #
########
def val(self, value=no_default):
"""Set the attribute value::
>>> d = PyQuery('<input />')
>>> d.val('Youhou')
[<input>]
Get the attribute value::
>>> d.val()
'Youhou'
Set the selected values for a `select` element with the `multiple`
attribute::
>>> d = PyQuery('''
... <select multiple>
... <option value="you"><option value="hou">
... </select>
... ''')
>>> d.val(['you', 'hou'])
[<select>]
Get the selected values for a `select` element with the `multiple`
attribute::
>>> d.val()
['you', 'hou']
"""
def _get_value(tag):
# <textarea>
if tag.tag == 'textarea':
return self._copy(tag).html()
# <select>
elif tag.tag == 'select':
if 'multiple' in tag.attrib:
# Only extract value if selected
selected = self._copy(tag)('option[selected]')
# Rebuild list to avoid serialization error
return list(selected.map(
lambda _, o: self._copy(o).attr('value')
))
selected_option = self._copy(tag)('option[selected]:last')
if selected_option:
return selected_option.attr('value')
else:
return self._copy(tag)('option').attr('value')
# <input type="checkbox"> or <input type="radio">
elif self.is_(':checkbox,:radio'):
val = self._copy(tag).attr('value')
if val is None:
return 'on'
else:
return val
# <input>
elif tag.tag == 'input':
val = self._copy(tag).attr('value')
return val.replace('\n', '') if val else ''
# everything else.
return self._copy(tag).attr('value') or ''
def _set_value(pq, value):
for tag in pq:
# <select>
if tag.tag == 'select':
if not isinstance(value, list):
value = [value]
def _make_option_selected(_, elem):
pq = self._copy(elem)
if pq.attr('value') in value:
pq.attr('selected', 'selected')
if 'multiple' not in tag.attrib:
del value[:] # Ensure it toggles first match
else:
pq.removeAttr('selected')
self._copy(tag)('option').each(_make_option_selected)
continue
# Stringify array
if isinstance(value, list):
value = ','.join(value)
# <textarea>
if tag.tag == 'textarea':
self._copy(tag).text(value)
continue
# <input> and everything else.
self._copy(tag).attr('value', value)
if value is no_default:
if len(self):
return _get_value(self[0])
else:
_set_value(self, value)
return self
def html(self, value=no_default, **kwargs):
"""Get or set the html representation of sub nodes.
Get the text value::
>>> d = PyQuery('<div><span>toto</span></div>')
>>> print(d.html())
<span>toto</span>
Extra args are passed to ``lxml.etree.tostring``::
>>> d = PyQuery('<div><span></span></div>')
>>> print(d.html())
<span/>
>>> print(d.html(method='html'))
<span></span>
Set the text value::
>>> d.html('<span>Youhou !</span>')
[<div>]
>>> print(d)
<div><span>Youhou !</span></div>
"""
if value is no_default:
if not self:
return None
tag = self[0]
children = tag.getchildren()
html = escape(tag.text or '', quote=False)
if not children:
return html
if 'encoding' not in kwargs:
kwargs['encoding'] = str
html += u''.join([etree.tostring(e, **kwargs)
for e in children])
return html
else:
if isinstance(value, self.__class__):
new_html = str(value)
elif isinstance(value, basestring):
new_html = value
elif not value:
new_html = ''
else:
raise ValueError(type(value))
for tag in self:
for child in tag.getchildren():
tag.remove(child)
root = fromstring(
u'<root>' + new_html + u'</root>',
self.parser)[0]
children = root.getchildren()
if children:
tag.extend(children)
tag.text = root.text
return self
@with_camel_case_alias
def outer_html(self, method="html"):
"""Get the html representation of the first selected element::
>>> d = PyQuery('<div><span class="red">toto</span> rocks</div>')
>>> print(d('span'))
<span class="red">toto</span> rocks
>>> print(d('span').outer_html())
<span class="red">toto</span>
>>> print(d('span').outerHtml())
<span class="red">toto</span>
>>> S = PyQuery('<p>Only <b>me</b> & myself</p>')
>>> print(S('b').outer_html())
<b>me</b>
..
"""
if not self:
return None
e0 = self[0]
if e0.tail:
e0 = deepcopy(e0)
e0.tail = ''
return etree.tostring(e0, encoding=str, method=method)
def text(self, value=no_default, **kwargs):
"""Get or set the text representation of sub nodes.
Get the text value::
>>> doc = PyQuery('<div><span>toto</span><span>tata</span></div>')
>>> print(doc.text())
tototata
>>> doc = PyQuery('''<div><span>toto</span>
... <span>tata</span></div>''')
>>> print(doc.text())
toto tata
Get the text value, without squashing newlines::
>>> doc = PyQuery('''<div><span>toto</span>
... <span>tata</span></div>''')
>>> print(doc.text(squash_space=False))
toto
tata
Set the text value::
>>> doc.text('Youhou !')
[<div>]
>>> print(doc)
<div>Youhou !</div>
"""
if value is no_default:
if not self:
return ''
return ' '.join(
self._copy(tag).html() if tag.tag == 'textarea' else
extract_text(tag, **kwargs) for tag in self
)
for tag in self:
for child in tag.getchildren():
tag.remove(child)
tag.text = value
return self
################
# Manipulating #
################
def _get_root(self, value):
if isinstance(value, basestring):
root = fromstring(u'<root>' + value + u'</root>',
self.parser)[0]
elif isinstance(value, etree._Element):
root = self._copy(value)
elif isinstance(value, PyQuery):
root = value
else:
raise TypeError(
'Value must be string, PyQuery or Element. Got %r' % value)
if hasattr(root, 'text') and isinstance(root.text, basestring):
root_text = root.text
else:
root_text = ''
return root, root_text
def append(self, value):
"""append value to each nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if len(tag) > 0: # if the tag has children
last_child = tag[-1]
if not last_child.tail:
last_child.tail = ''
last_child.tail += root_text
else:
if not tag.text:
tag.text = ''
tag.text += root_text
if i > 0:
root = deepcopy(list(root))
tag.extend(root)
return self
@with_camel_case_alias
def append_to(self, value):
"""append nodes to value
"""
value.append(self)
return self
def prepend(self, value):
"""prepend value to nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if not tag.text:
tag.text = ''
if len(root) > 0:
root[-1].tail = tag.text
tag.text = root_text
else:
tag.text = root_text + tag.text
if i > 0:
root = deepcopy(list(root))
tag[:0] = root
root = tag[:len(root)]
return self
@with_camel_case_alias
def prepend_to(self, value):
"""prepend nodes to value
"""
value.prepend(self)
return self
def after(self, value):
"""add value after nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if not tag.tail:
tag.tail = ''
tag.tail += root_text
if i > 0:
root = deepcopy(list(root))
parent = tag.getparent()
index = parent.index(tag) + 1
parent[index:index] = root
root = parent[index:len(root)]
return self
@with_camel_case_alias
def insert_after(self, value):
"""insert nodes after value
"""
value.after(self)
return self
def before(self, value):
"""insert value before nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
previous = tag.getprevious()
if previous is not None:
if not previous.tail:
previous.tail = ''
previous.tail += root_text
else:
parent = tag.getparent()
if not parent.text:
parent.text = ''
parent.text += root_text
if i > 0:
root = deepcopy(list(root))
parent = tag.getparent()
index = parent.index(tag)
parent[index:index] = root
root = parent[index:len(root)]
return self
@with_camel_case_alias
def insert_before(self, value):
"""insert nodes before value
"""
value.before(self)
return self
def wrap(self, value):
"""A string of HTML that will be created on the fly and wrapped around
each target:
>>> d = PyQuery('<span>youhou</span>')
>>> d.wrap('<div></div>')
[<div>]
>>> print(d)
<div><span>youhou</span></div>
"""
assert isinstance(value, basestring)
value = fromstring(value)[0]
nodes = []
for tag in self:
wrapper = deepcopy(value)
# FIXME: using iterchildren is probably not optimal
if not wrapper.getchildren():
wrapper.append(deepcopy(tag))
else:
childs = [c for c in wrapper.iterchildren()]
child = childs[-1]
child.append(deepcopy(tag))
nodes.append(wrapper)
parent = tag.getparent()
if parent is not None:
for t in parent.iterchildren():
if t is tag:
t.addnext(wrapper)
parent.remove(t)
break
self[:] = nodes
return self
@with_camel_case_alias
def wrap_all(self, value):
"""Wrap all the elements in the matched set into a single wrapper
element::
>>> d = PyQuery('<div><span>Hey</span><span>you !</span></div>')
>>> print(d('span').wrap_all('<div id="wrapper"></div>'))
<div id="wrapper"><span>Hey</span><span>you !</span></div>
>>> d = PyQuery('<div><span>Hey</span><span>you !</span></div>')
>>> print(d('span').wrapAll('<div id="wrapper"></div>'))
<div id="wrapper"><span>Hey</span><span>you !</span></div>
..
"""
if not self:
return self
assert isinstance(value, basestring)
value = fromstring(value)[0]
wrapper = deepcopy(value)
if not wrapper.getchildren():
child = wrapper
else:
childs = [c for c in wrapper.iterchildren()]
child = childs[-1]
replace_childs = True
parent = self[0].getparent()
if parent is None:
parent = no_default
# add nodes to wrapper and check parent
for tag in self:
child.append(deepcopy(tag))
if tag.getparent() is not parent:
replace_childs = False
# replace nodes i parent if possible
if parent is not no_default and replace_childs:
childs = [c for c in parent.iterchildren()]
if len(childs) == len(self):
for tag in self:
parent.remove(tag)
parent.append(wrapper)
self[:] = [wrapper]
return self
@with_camel_case_alias
def replace_with(self, value):
"""replace nodes by value:
>>> doc = PyQuery("<html><div /></html>")
>>> node = PyQuery("<span />")
>>> child = doc.find('div')
>>> child.replace_with(node)
[<div>]
>>> print(doc)
<html><span/></html>
"""
if isinstance(value, PyQuery):
value = str(value)
if hasattr(value, '__call__'):
for i, element in enumerate(self):
self._copy(element).before(
value(i, element) + (element.tail or ''))
parent = element.getparent()
parent.remove(element)
else:
for tag in self:
self._copy(tag).before(value + (tag.tail or ''))
parent = tag.getparent()
parent.remove(tag)
return self
@with_camel_case_alias
def replace_all(self, expr):
"""replace nodes by expr
"""
if self._parent is no_default:
raise ValueError(
'replaceAll can only be used with an object with parent')
self._parent(expr).replace_with(self)
return self
def clone(self):
"""return a copy of nodes
"""
return PyQuery([deepcopy(tag) for tag in self])
def empty(self):
"""remove nodes content
"""
for tag in self:
tag.text = None
tag[:] = []
return self
def remove(self, expr=no_default):
"""Remove nodes:
>>> h = (
... '<div>Maybe <em>she</em> does <strong>NOT</strong> know</div>'
... )
>>> d = PyQuery(h)
>>> d('strong').remove()
[<strong>]
>>> print(d)
<div>Maybe <em>she</em> does know</div>
"""
if expr is no_default:
for tag in self:
parent = tag.getparent()
if parent is not None:
if tag.tail:
prev = tag.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += tag.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += tag.tail
parent.remove(tag)
else:
results = self._copy(expr, self)
results.remove()
return self
class Fn(object):
"""Hook for defining custom function (like the jQuery.fn):
.. sourcecode:: python
>>> fn = lambda: this.map(lambda i, el: PyQuery(this).outerHtml())
>>> PyQuery.fn.listOuterHtml = fn
>>> S = PyQuery(
... '<ol> <li>Coffee</li> <li>Tea</li> <li>Milk</li> </ol>')
>>> S('li').listOuterHtml()
['<li>Coffee</li>', '<li>Tea</li>', '<li>Milk</li>']
"""
def __setattr__(self, name, func):
def fn(self, *args, **kwargs):
func.__globals__['this'] = self
return func(*args, **kwargs)
fn.__name__ = name
setattr(PyQuery, name, fn)
fn = Fn()
########
# AJAX #
########
@with_camel_case_alias
def serialize_array(self):
"""Serialize form elements as an array of dictionaries, whose structure
mirrors that produced by the jQuery API. Notably, it does not handle
the deprecated `keygen` form element.
>>> d = PyQuery('<form><input name="order" value="spam"></form>')
>>> d.serialize_array() == [{'name': 'order', 'value': 'spam'}]
True
>>> d.serializeArray() == [{'name': 'order', 'value': 'spam'}]
True
"""
return list(map(
lambda p: {'name': p[0], 'value': p[1]},
self.serialize_pairs()
))
def serialize(self):
"""Serialize form elements as a URL-encoded string.
>>> h = (
... '<form><input name="order" value="spam">'
... '<input name="order2" value="baked beans"></form>'
... )
>>> d = PyQuery(h)
>>> d.serialize()
'order=spam&order2=baked%20beans'
"""
return urlencode(self.serialize_pairs()).replace('+', '%20')
#####################################################
# Additional methods that are not in the jQuery API #
#####################################################
@with_camel_case_alias
def serialize_pairs(self):
"""Serialize form elements as an array of 2-tuples conventional for
typical URL-parsing operations in Python.
>>> d = PyQuery('<form><input name="order" value="spam"></form>')
>>> d.serialize_pairs()
[('order', 'spam')]
>>> d.serializePairs()
[('order', 'spam')]
"""
# https://github.com/jquery/jquery/blob
# /2d4f53416e5f74fa98e0c1d66b6f3c285a12f0ce/src/serialize.js#L14
_submitter_types = ['submit', 'button', 'image', 'reset', 'file']
controls = self._copy([])
# Expand list of form controls
for el in self.items():
if el[0].tag == 'form':
form_id = el.attr('id')
if form_id:
# Include inputs outside of their form owner
root = self._copy(el.root.getroot())
controls.extend(root(
'#%s :not([form]):input, [form="%s"]:input'
% (form_id, form_id)))
else:
controls.extend(el(':not([form]):input'))
elif el[0].tag == 'fieldset':
controls.extend(el(':input'))
else:
controls.extend(el)
# Filter controls
selector = '[name]:enabled:not(button)' # Not serializing image button
selector += ''.join(map(
lambda s: ':not([type="%s"])' % s,
_submitter_types))
controls = controls.filter(selector)
def _filter_out_unchecked(_, el):
el = controls._copy(el)
return not el.is_(':checkbox:not(:checked)') and \
not el.is_(':radio:not(:checked)')
controls = controls.filter(_filter_out_unchecked)
# jQuery serializes inputs with the datalist element as an ancestor
# contrary to WHATWG spec as of August 2018
#
# xpath = 'self::*[not(ancestor::datalist)]'
# results = []
# for tag in controls:
# results.extend(tag.xpath(xpath, namespaces=controls.namespaces))
# controls = controls._copy(results)
# Serialize values
ret = []
for field in controls:
val = self._copy(field).val() or ''
if isinstance(val, list):
ret.extend(map(
lambda v: (field.attrib['name'], v.replace('\n', '\r\n')),
val
))
else:
ret.append((field.attrib['name'], val.replace('\n', '\r\n')))
return ret
@with_camel_case_alias
def serialize_dict(self):
"""Serialize form elements as an ordered dictionary. Multiple values
corresponding to the same input name are concatenated into one list.
>>> d = PyQuery('''<form>
... <input name="order" value="spam">
... <input name="order" value="eggs">
... <input name="order2" value="ham">
... </form>''')
>>> d.serialize_dict()
OrderedDict({'order': ['spam', 'eggs'], 'order2': 'ham'})
>>> d.serializeDict()
OrderedDict({'order': ['spam', 'eggs'], 'order2': 'ham'})
"""
ret = OrderedDict()
for name, val in self.serialize_pairs():
if name not in ret:
ret[name] = val
elif not isinstance(ret[name], list):
ret[name] = [ret[name], val]
else:
ret[name].append(val)
return ret
@property
def base_url(self):
"""Return the url of current html document or None if not available.
"""
if self._base_url is not None:
return self._base_url
if self._parent is not no_default:
return self._parent.base_url
def make_links_absolute(self, base_url=None):
"""Make all links absolute.
"""
if base_url is None:
base_url = self.base_url
if base_url is None:
raise ValueError((
'You need a base URL to make your links'
'absolute. It can be provided by the base_url parameter.'))
def repl(attr):
def rep(i, e):
attr_value = self(e).attr(attr)
# when label hasn't such attr, pass
if attr_value is None:
return None
# skip specific "protocol" schemas
if any(attr_value.startswith(schema)
for schema in ('tel:', 'callto:', 'sms:')):
return None
return self(e).attr(attr,
urljoin(base_url, attr_value.strip()))
return rep
self('a').each(repl('href'))
self('link').each(repl('href'))
self('script').each(repl('src'))
self('img').each(repl('src'))
self('iframe').each(repl('src'))
self('form').each(repl('action'))
return self
build_camel_case_aliases(PyQuery)
| PyQuery |
python | sympy__sympy | sympy/polys/domains/expressionrawdomain.py | {
"start": 375,
"end": 1448
} | class ____(Field, CharacteristicZero, SimpleDomain):
"""A class for arbitrary expressions but without automatic simplification. """
is_SymbolicRawDomain = is_EXRAW = True
dtype = Expr
zero = S.Zero
one = S.One
rep = 'EXRAW'
has_assoc_Ring = False
has_assoc_Field = True
def __init__(self):
pass
@classmethod
def new(self, a):
return sympify(a)
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return a
def from_sympy(self, a):
"""Convert SymPy's expression to ``dtype``. """
if not isinstance(a, Expr):
raise CoercionFailed(f"Expecting an Expr instance but found: {type(a).__name__}")
return a
def convert_from(self, a, K):
"""Convert a domain element from another domain to EXRAW"""
return K.to_sympy(a)
def get_field(self):
"""Returns a field associated with ``self``. """
return self
def sum(self, items):
return Add(*items)
EXRAW = ExpressionRawDomain()
| ExpressionRawDomain |
python | huggingface__transformers | src/transformers/models/jamba/modular_jamba.py | {
"start": 22728,
"end": 22767
} | class ____(MistralMLP):
pass
| JambaMLP |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/normalization.py | {
"start": 158,
"end": 2339
} | class ____(torch.nn.LayerNorm):
r"""This is the quantized version of :class:`~torch.nn.LayerNorm`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(
self,
normalized_shape,
weight,
bias,
scale,
zero_point,
eps=1e-5,
elementwise_affine=True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
# pyrefly: ignore [bad-argument-type]
**factory_kwargs,
)
self.weight = weight
self.bias = bias
# pyrefly: ignore [bad-argument-type]
self.register_buffer("scale", torch.tensor(scale, **factory_kwargs))
# pyrefly: ignore [bad-argument-type]
self.register_buffer("zero_point", torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.layer_norm(
input,
self.normalized_shape,
weight=self.weight,
bias=self.bias,
eps=self.eps,
output_scale=self.scale,
output_zero_point=self.zero_point,
)
def _get_name(self):
return "QuantizedLayerNorm"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.normalized_shape,
mod.weight,
mod.bias,
float(scale),
int(zero_point),
mod.eps,
mod.elementwise_affine,
)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.normalized_shape,
mod.weight,
mod.bias,
float(scale),
int(zero_point),
mod.eps,
mod.elementwise_affine,
)
| LayerNorm |
python | getsentry__sentry | tests/sentry/utils/test_geo.py | {
"start": 217,
"end": 676
} | class ____(TestCase):
def test_geo_by_addr(self) -> None:
import importlib
import sentry.utils.geo
importlib.reload(sentry.utils.geo)
from sentry.utils.geo import geo_by_addr
assert geo_by_addr("8.8.8.8") == {
"country_code": "US",
"region": "CA",
"city": "Beverly Hills",
"latitude": 34.09109878540039,
"longitude": -118.41169738769531,
}
| TestGeo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/kwargsUnpack1.py | {
"start": 353,
"end": 1785
} | class ____(TD1):
v3: Required[str]
def func1(**kwargs: Unpack[TD2]) -> None:
v1 = kwargs["v1"]
reveal_type(v1, expected_text="int")
# This should generate an error because v2 might not be present.
kwargs["v2"]
if "v2" in kwargs:
v2 = kwargs["v2"]
reveal_type(v2, expected_text="str")
v3 = kwargs["v3"]
reveal_type(v3, expected_text="str")
reveal_type(func1, expected_text="(**kwargs: **TD2) -> None")
def func2(v3: str, **kwargs: Unpack[TD1]) -> None:
pass
def func3():
# This should generate an error because it is
# missing required keyword arguments.
func1()
func1(v1=1, v2="", v3="5")
td2 = TD2(v1=2, v3="4")
func1(**td2)
# This should generate an error because v4 is not in TD2.
func1(v1=1, v2="", v3="5", v4=5)
# This should generate an error because args are passed by position.
func1(1, "", "5")
my_dict: dict[str, str] = {}
# This should generate an error because it's an untyped dict.
func1(**my_dict)
d1 = {"v1": 2, "v3": "4", "v4": 4}
func1(**d1)
func2(**td2)
# This should generate an error because v1 is already specified.
func1(v1=2, **td2)
# This should generate an error because v1 is already specified.
func2(1, **td2)
# This should generate an error because v1 is matched to a
# named parameter and is not available for kwargs.
func2(v1=1, **td2)
| TD2 |
python | geekcomputers__Python | brickout-game/brickout-game.py | {
"start": 2643,
"end": 3470
} | class ____(object):
def __init__(self, screen, width, height, x, y):
self.__screen = screen
self._width = width
self._height = height
self._xLoc = x
self._yLoc = y
w, h = pygame.display.get_surface().get_size()
self.__W = w
self.__H = h
def draw(self):
"""
draws the paddle onto screen.
"""
pygame.draw.rect(
screen, (0, 0, 0), (self._xLoc, self._yLoc, self._width, self._height), 0
)
def update(self):
"""
moves the paddle at the screen via mouse
"""
x, y = pygame.mouse.get_pos()
if x >= 0 and x <= (self.__W - self._width):
self._xLoc = x
"""
This class represents a simple Brick class.
For representing bricks onto screen.
"""
| Paddle |
python | numpy__numpy | benchmarks/benchmarks/bench_core.py | {
"start": 3094,
"end": 3638
} | class ____(Benchmark):
params = [[50, 1000, int(1e5)],
[10, 100, 1000, int(1e4)],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2 * np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
| CorrConv |
python | huggingface__transformers | tests/models/conditional_detr/test_modeling_conditional_detr.py | {
"start": 1448,
"end": 6642
} | class ____:
def __init__(
self,
parent,
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=12,
num_channels=3,
min_size=200,
max_size=200,
n_targets=8,
num_labels=91,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.n_targets = n_targets
self.num_labels = num_labels
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32)
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size])
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, labels
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return ConditionalDetrConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
use_timm_backbone=False,
backbone_config=resnet_config,
backbone=None,
use_pretrained_backbone=False,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels):
model = ConditionalDetrModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)
)
def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = ConditionalDetrForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
| ConditionalDetrModelTester |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 2760,
"end": 3365
} | class ____(ConcreteTemplate):
key = cuda.shfl_sync_intrinsic
cases = [
signature(types.Tuple((types.i4, types.b1)),
types.i4, types.i4, types.i4, types.i4, types.i4),
signature(types.Tuple((types.i8, types.b1)),
types.i4, types.i4, types.i8, types.i4, types.i4),
signature(types.Tuple((types.f4, types.b1)),
types.i4, types.i4, types.f4, types.i4, types.i4),
signature(types.Tuple((types.f8, types.b1)),
types.i4, types.i4, types.f8, types.i4, types.i4),
]
@register
| Cuda_shfl_sync_intrinsic |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-maritalk/llama_index/llms/maritalk/base.py | {
"start": 863,
"end": 1742
} | class ____(HTTPError):
def __init__(self, request_obj: Response) -> None:
self.request_obj = request_obj
try:
response_json = request_obj.json()
if "detail" in response_json:
api_message = response_json["detail"]
elif "message" in response_json:
api_message = response_json["message"]
else:
api_message = response_json
except Exception:
api_message = request_obj.text
self.message = api_message
self.status_code = request_obj.status_code
def __str__(self) -> str:
status_code_meaning = HTTPStatus(self.status_code).phrase
formatted_message = f"HTTP Error: {self.status_code} - {status_code_meaning}"
formatted_message += f"\nDetail: {self.message}"
return formatted_message
| MaritalkHTTPError |
python | django__django | tests/m2m_through_regress/test_multitable.py | {
"start": 155,
"end": 2297
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.alice = Person.objects.create(name="Alice")
cls.bob = Person.objects.create(name="Bob")
cls.chris = Person.objects.create(name="Chris")
cls.dan = Person.objects.create(name="Dan")
cls.team_alpha = Group.objects.create(name="Alpha")
Membership.objects.create(person=cls.alice, group=cls.team_alpha)
Membership.objects.create(person=cls.bob, group=cls.team_alpha)
cls.event = Event.objects.create(name="Exposition Match")
IndividualCompetitor.objects.create(event=cls.event, person=cls.chris)
IndividualCompetitor.objects.create(event=cls.event, person=cls.dan)
CompetingTeam.objects.create(event=cls.event, team=cls.team_alpha)
def test_m2m_query(self):
result = self.event.teams.all()
self.assertCountEqual(result, [self.team_alpha])
def test_m2m_reverse_query(self):
result = self.chris.event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_query_proxied(self):
result = self.event.special_people.all()
self.assertCountEqual(result, [self.chris, self.dan])
def test_m2m_reverse_query_proxied(self):
result = self.chris.special_event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_prefetch_proxied(self):
result = Event.objects.filter(name="Exposition Match").prefetch_related(
"special_people"
)
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.event])
self.assertEqual(
sorted(p.name for p in result[0].special_people.all()), ["Chris", "Dan"]
)
def test_m2m_prefetch_reverse_proxied(self):
result = Person.objects.filter(name="Dan").prefetch_related("special_event_set")
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.dan])
self.assertEqual(
[event.name for event in result[0].special_event_set.all()],
["Exposition Match"],
)
| MultiTableTests |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 228245,
"end": 243156
} | class ____(ConditionalStringFieldDef):
r"""
ConditionalParameterStringFieldDef schema wrapper.
Parameters
----------
param : str, :class:`ParameterName`
Filter using a parameter name.
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
empty : bool
For selection parameters, the predicate of empty selections returns true by default.
Override this behavior, by setting this property ``empty: false``.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
format : str, dict, :class:`Dict`, :class:`Format`, :class:`TimeFormatSpecifier`
The text format specifier for formatting number and date/time in labels of guides
(axes, legends, headers) and text marks.
If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's
`number format pattern string <https://github.com/d3/d3-format#locale_format>`__.
If the format type is ``"time"`` (e.g., for temporal fields), this is either: a)
D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you
desire to set a static time format.
b) `dynamic time format specifier object
<https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you
desire to set a dynamic time format that uses different formats depending on the
granularity of the input date (e.g., if the date lies on a year, month, date, hour,
etc. boundary).
When used with a `custom formatType
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this
value will be passed as ``format`` alongside ``datum.value`` to the registered
function.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : str
The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom
format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__.
**Default value:**
* ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nominal fields without
``timeUnit``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/ConditionalParameter<StringFieldDef>"}
def __init__(
self,
param: Optional[str | SchemaBase] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
param=param,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
empty=empty,
field=field,
format=format,
formatType=formatType,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
| ConditionalParameterStringFieldDef |
python | numba__numba | numba/cuda/stubs.py | {
"start": 4267,
"end": 4551
} | class ____(Stub):
'''
shfl_sync_intrinsic(mask, mode, value, mode_offset, clamp)
Nvvm intrinsic for shuffling data across a warp
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-datamove
'''
_description_ = '<shfl_sync()>'
| shfl_sync_intrinsic |
python | pyca__cryptography | src/cryptography/x509/certificate_transparency.py | {
"start": 398,
"end": 438
} | class ____(utils.Enum):
v1 = 0
| Version |
python | doocs__leetcode | solution/1300-1399/1318.Minimum Flips to Make a OR b Equal to c/Solution.py | {
"start": 0,
"end": 254
} | class ____:
def minFlips(self, a: int, b: int, c: int) -> int:
ans = 0
for i in range(32):
x, y, z = a >> i & 1, b >> i & 1, c >> i & 1
ans += x + y if z == 0 else int(x == 0 and y == 0)
return ans
| Solution |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/prepare_image_stage.py | {
"start": 9138,
"end": 12922
} | class ____(StatefulStageUDF):
def __init__(self, data_column: str, expected_input_keys: List[str]):
super().__init__(data_column, expected_input_keys)
self.Image = importlib.import_module("PIL.Image")
self.image_processor = ImageProcessor()
def extract_image_info(self, messages: List[Dict]) -> List[_ImageType]:
"""Extract image information from chat messages.
Args:
messages: List of chat messages.
Returns:
List of _ImageType.
Note:
The optional 'detail' parameter from the OpenAI schema is not
passed forward to downstream templates.
"""
image_info: List[_ImageType] = []
for message in messages:
content = message["content"]
# Convert PyArrow objects to Python objects if needed (like ChatTemplateStage).
# This handles the case where unform content types are serialized with PyArrow
# instead of pickle- happens when all messages have the same content structure
# (e.g., no system prompt + string content mixed with user messages with list content).
if hasattr(content, "tolist"):
content = content.tolist()
if not isinstance(content, list):
continue
for content_item in content:
if content_item["type"] not in ("image", "image_url"):
continue
image_data = content_item[content_item["type"]]
if content_item["type"] == "image_url" and isinstance(image_data, dict):
# OpenAI nested format: {"image_url": {"url": "..."}}
image = image_data.get("url")
if not isinstance(image, str) or not image:
raise ValueError(
"image_url must be an object with a non-empty 'url' string"
)
else:
# Simple format: {"image": "..."} or {"image_url": "..."}
image = image_data
if not isinstance(image, str) and not isinstance(
image, self.Image.Image
):
raise ValueError(f"Cannot handle image type {type(image)}")
image_info.append(image)
return image_info
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
messages = [row["messages"] for row in batch]
# Process all images in this batch.
all_image_info = [self.extract_image_info(message) for message in messages]
flat_all_image_info = [img for imgs in all_image_info for img in imgs]
flat_all_images = await self.image_processor.process(flat_all_image_info)
# TODO: We now use asyncio.gather to process all images in this batch,
# so the outputs here must be in order. However, it is more efficient
# to support out-of-order outputs so that we won't be blocked by slow
# downloaded images.
img_start_idx = 0
idx_in_batch = 0
for image_info_per_req in all_image_info:
num_images_in_req = len(image_info_per_req)
ret = {self.IDX_IN_BATCH_COLUMN: idx_in_batch}
idx_in_batch += 1
if num_images_in_req > 0:
images = flat_all_images[
img_start_idx : img_start_idx + num_images_in_req
]
ret.update(
{
"image": images,
"image_sizes": [(img.width, img.height) for img in images],
}
)
img_start_idx += num_images_in_req
yield ret
| PrepareImageUDF |
python | conda__conda | conda/plugins/types.py | {
"start": 11034,
"end": 11328
} | class ____(ABC):
def __init__(self, message: str, fail_message: str = "failed\n"):
self.message = message
self.fail_message = fail_message
@abstractmethod
def __enter__(self): ...
@abstractmethod
def __exit__(self, exc_type, exc_val, exc_tb): ...
| SpinnerBase |
python | pallets__click | src/click/testing.py | {
"start": 3679,
"end": 6336
} | class ____:
"""Holds the captured result of an invoked CLI script.
:param runner: The runner that created the result
:param stdout_bytes: The standard output as bytes.
:param stderr_bytes: The standard error as bytes.
:param output_bytes: A mix of ``stdout_bytes`` and ``stderr_bytes``, as the
user would see it in its terminal.
:param return_value: The value returned from the invoked command.
:param exit_code: The exit code as integer.
:param exception: The exception that happened if one did.
:param exc_info: Exception information (exception type, exception instance,
traceback type).
.. versionchanged:: 8.2
``stderr_bytes`` no longer optional, ``output_bytes`` introduced and
``mix_stderr`` has been removed.
.. versionadded:: 8.0
Added ``return_value``.
"""
def __init__(
self,
runner: CliRunner,
stdout_bytes: bytes,
stderr_bytes: bytes,
output_bytes: bytes,
return_value: t.Any,
exit_code: int,
exception: BaseException | None,
exc_info: tuple[type[BaseException], BaseException, TracebackType]
| None = None,
):
self.runner = runner
self.stdout_bytes = stdout_bytes
self.stderr_bytes = stderr_bytes
self.output_bytes = output_bytes
self.return_value = return_value
self.exit_code = exit_code
self.exception = exception
self.exc_info = exc_info
@property
def output(self) -> str:
"""The terminal output as unicode string, as the user would see it.
.. versionchanged:: 8.2
No longer a proxy for ``self.stdout``. Now has its own independent stream
that is mixing `<stdout>` and `<stderr>`, in the order they were written.
"""
return self.output_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
@property
def stdout(self) -> str:
"""The standard output as unicode string."""
return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
@property
def stderr(self) -> str:
"""The standard error as unicode string.
.. versionchanged:: 8.2
No longer raise an exception, always returns the `<stderr>` string.
"""
return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
def __repr__(self) -> str:
exc_str = repr(self.exception) if self.exception else "okay"
return f"<{type(self).__name__} {exc_str}>"
| Result |
python | django-haystack__django-haystack | test_haystack/test_loading.py | {
"start": 8109,
"end": 8376
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
author = indexes.MultiValueField(stored=False)
title = indexes.CharField(indexed=False)
def get_model(self):
return MockModel
| MultiValueValidSearchIndex |
python | django__django | tests/indexes/tests.py | {
"start": 14724,
"end": 21932
} | class ____(TransactionTestCase):
# Schema editor is used to create the index to test that it works.
available_apps = ["indexes"]
def test_partial_index(self):
with connection.schema_editor() as editor:
index = Index(
name="recent_article_idx",
fields=["pub_date"],
condition=Q(
pub_date__gt=datetime.datetime(
year=2015,
month=1,
day=1,
# PostgreSQL would otherwise complain about the lookup
# being converted to a mutable function (by removing
# the timezone in the cast) which is forbidden.
tzinfo=timezone.get_current_timezone(),
),
),
)
self.assertIn(
"WHERE %s" % editor.quote_name("pub_date"),
str(index.create_sql(Article, schema_editor=editor)),
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(
index.name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=Article._meta.db_table,
),
)
editor.remove_index(index=index, model=Article)
def test_integer_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name="recent_article_idx",
fields=["id"],
condition=Q(pk__gt=1),
)
self.assertIn(
"WHERE %s" % editor.quote_name("id"),
str(index.create_sql(Article, schema_editor=editor)),
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(
index.name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=Article._meta.db_table,
),
)
editor.remove_index(index=index, model=Article)
def test_boolean_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name="published_index",
fields=["published"],
condition=Q(published=True),
)
self.assertIn(
"WHERE %s" % editor.quote_name("published"),
str(index.create_sql(Article, schema_editor=editor)),
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(
index.name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=Article._meta.db_table,
),
)
editor.remove_index(index=index, model=Article)
@skipUnlessDBFeature("supports_functions_in_partial_indexes")
def test_multiple_conditions(self):
with connection.schema_editor() as editor:
index = Index(
name="recent_article_idx",
fields=["pub_date", "headline"],
condition=(
Q(
pub_date__gt=datetime.datetime(
year=2015,
month=1,
day=1,
tzinfo=timezone.get_current_timezone(),
)
)
& Q(headline__contains="China")
),
)
sql = str(index.create_sql(Article, schema_editor=editor))
where = sql.find("WHERE")
self.assertIn("WHERE (%s" % editor.quote_name("pub_date"), sql)
# Because each backend has different syntax for the operators,
# check ONLY the occurrence of headline in the SQL.
self.assertGreater(sql.rfind("headline"), where)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(
index.name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=Article._meta.db_table,
),
)
editor.remove_index(index=index, model=Article)
def test_is_null_condition(self):
with connection.schema_editor() as editor:
index = Index(
name="recent_article_idx",
fields=["pub_date"],
condition=Q(pub_date__isnull=False),
)
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("pub_date"),
str(index.create_sql(Article, schema_editor=editor)),
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(
index.name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=Article._meta.db_table,
),
)
editor.remove_index(index=index, model=Article)
@skipUnlessDBFeature("supports_expression_indexes")
def test_partial_func_index(self):
index_name = "partial_func_idx"
index = Index(
Lower("headline").desc(),
name=index_name,
condition=Q(pub_date__isnull=False),
)
with connection.schema_editor() as editor:
editor.add_index(index=index, model=Article)
sql = index.create_sql(Article, schema_editor=editor)
table = Article._meta.db_table
self.assertIs(sql.references_column(table, "headline"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("headline"), sql)
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("pub_date"),
sql,
)
self.assertGreater(sql.find("WHERE"), sql.find("LOWER"))
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(
cursor=cursor,
table_name=table,
)
self.assertIn(index_name, constraints)
if connection.features.supports_index_column_ordering:
self.assertEqual(constraints[index_name]["orders"], ["DESC"])
with connection.schema_editor() as editor:
editor.remove_index(Article, index)
with connection.cursor() as cursor:
self.assertNotIn(
index_name,
connection.introspection.get_constraints(
cursor=cursor,
table_name=table,
),
)
@skipUnlessDBFeature("supports_covering_indexes")
| PartialIndexTests |
python | getsentry__sentry | src/sentry/issue_detection/detectors/experiments/mn_plus_one_db_span_detector.py | {
"start": 5525,
"end": 15498
} | class ____(MNPlusOneState):
"""
The state for when we think we might have found a pattern: a sequence of
spans that has begun to repeat.
When the sequence is broken (either by a mismatched span or span iteration
finishing), returns to the SearchingMNPlusOne state, possibly returning a
PerformanceProblem if the detected sequence met our thresholds.
"""
__slots__ = ("settings", "event", "pattern", "spans", "pattern_index", "parent_map")
def __init__(
self,
*,
settings: dict[str, Any],
event: dict[str, Any],
pattern: list[Span],
first_span: Span,
parent_map: dict[str, str],
) -> None:
self.settings = settings
self.event = event
self.pattern = pattern
self.parent_map = parent_map
"""
A mapping of all visited spans IDs to their parent span IDs (span_id -> parent_span_id).
In practice, this parent_map is passed back and forth between states to maintain a stable
reference for any visited span regardless of whether a pattern is found.
"""
# The full list of spans involved in the MN pattern.
self.spans = pattern.copy()
self.spans.append(first_span)
self.pattern_index = 1
def next(self, span: Span) -> tuple[MNPlusOneState, PerformanceProblem | None]:
span_id = span.get("span_id")
parent_span_id = span.get("parent_span_id")
if span_id and parent_span_id:
self.parent_map[span_id] = parent_span_id
# If the MN pattern is continuing, carry on in this state.
pattern_span = self.pattern[self.pattern_index]
if self._equivalent(pattern_span, span):
self.spans.append(span)
self.pattern_index += 1
if self.pattern_index >= len(self.pattern):
self.pattern_index = 0
return (self, None)
# We've broken the MN pattern, so return to the Searching state. If it
# is a significant problem, also return a PerformanceProblem.
# Keep more context for pattern detection by including spans that could be
# the beginning of a new pattern. Instead of just keeping the incomplete
# remainder, keep the last pattern_length spans plus the current span.
# Keep at least the last pattern_length spans (or all if we have fewer)
pattern_length = len(self.pattern)
context_start = max(0, len(self.spans) - pattern_length)
remaining_spans = self.spans[context_start:] + [span]
return (
SearchingForMNPlusOne(
settings=self.settings,
event=self.event,
parent_map=self.parent_map,
initial_spans=remaining_spans,
),
self._maybe_performance_problem(),
)
def finish(self) -> PerformanceProblem | None:
return self._maybe_performance_problem()
def _maybe_performance_problem(self) -> PerformanceProblem | None:
times_occurred = int(len(self.spans) / len(self.pattern))
minimum_occurrences_of_pattern = self.settings["minimum_occurrences_of_pattern"]
if times_occurred < minimum_occurrences_of_pattern:
return None
offender_span_count = len(self.pattern) * times_occurred
offender_spans = self.spans[:offender_span_count]
# Consider all spans when evaluating the duration threshold, however at least 10 percent
# of the total duration of offenders should be from db spans.
total_spans_duration = total_span_time(offender_spans)
if total_spans_duration < self.settings["total_duration_threshold"]:
metrics.incr("mn_plus_one_db_span_detector.below_duration_threshold")
return None
offender_db_spans = [span for span in offender_spans if span["op"].startswith("db")]
total_db_spans_duration = total_span_time(offender_db_spans)
pct_db_spans = total_db_spans_duration / total_spans_duration if total_spans_duration else 0
if pct_db_spans < self.settings["min_percentage_of_db_spans"]:
metrics.incr("mn_plus_one_db_span_detector.below_db_span_percentage")
return None
common_parent_span = self._find_common_parent_span(offender_spans)
if not common_parent_span:
metrics.incr("mn_plus_one_db_span_detector.no_parent_span")
return None
db_span = self._first_relevant_db_span()
if not db_span:
metrics.incr("mn_plus_one_db_span_detector.no_db_span")
return None
db_span_ids = [span["span_id"] for span in offender_db_spans]
offender_span_ids = [span["span_id"] for span in offender_spans]
return PerformanceProblem(
fingerprint=self._fingerprint(db_span["hash"], common_parent_span),
op="db",
desc=db_span["description"],
type=PerformanceNPlusOneGroupType,
parent_span_ids=[common_parent_span["span_id"]],
cause_span_ids=db_span_ids,
offender_span_ids=offender_span_ids,
evidence_data={
"op": "db",
"parent_span_ids": [common_parent_span["span_id"]],
"cause_span_ids": db_span_ids,
"offender_span_ids": offender_span_ids,
"transaction_name": self.event.get("transaction", ""),
"parent_span": get_span_evidence_value(common_parent_span),
"repeating_spans": [get_span_evidence_value(span) for span in self.pattern],
"repeating_spans_compact": [
get_span_evidence_value(span, include_op=False) for span in self.pattern
],
"number_repeating_spans": str(len(offender_spans)),
"pattern_size": len(self.pattern),
"num_pattern_repetitions": times_occurred,
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
"db",
db_span["description"],
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
def _first_relevant_db_span(self) -> Span | None:
for span in self.spans:
if (
span["op"].startswith("db")
and get_span_evidence_value(span, include_op=False) != "prisma:engine:connection"
):
return span
return None
def _find_common_parent_span(self, spans: Sequence[Span]) -> Span | None:
"""
Using the self.parent_map, identify the common parent within the configured depth
of the every span in the list. Returns None if no common parent is found, or the common
parent is not within the event.
"""
# Use a set to track the common parent across all spans.
# It'll start empty, fill with the first span's parents, and then intersect every span's
# parent list after that.
common_parent_set: set[str] = set()
# We also store the latest parent list for ordering later on.
latest_parent_list: list[str] = []
for span in spans:
span_id = span.get("span_id")
if not span_id:
return None
current_parent_list = []
current_span_id = span_id
# This will run at most `max_allowable_depth` times for n spans.
# For that reason, `max_allowable_depth` cannot be user-configurable -- to avoid
# O(n^2) complexity and load issues.
for _ in range(self.settings["max_allowable_depth"]):
parent_span_id = self.parent_map.get(current_span_id)
if not parent_span_id:
break
current_parent_list.append(parent_span_id)
# If this parent_span_id is already in the global intersection, stop early, we don't
# need to build the rest of the parent list.
if parent_span_id in common_parent_set:
break
current_span_id = parent_span_id
# If common_parent_set is empty (first iteration), set it to the current parent list.
# Otherwise, intersect it with the current_parent_list.
common_parent_set = (
common_parent_set.intersection(set(current_parent_list))
if common_parent_set
else set(current_parent_list)
)
# At this point, if common_parent_set is empty, we can bail out early since that means
# at least two parent lists have no intersection, thus no common parent.
if not common_parent_set:
return None
latest_parent_list = current_parent_list
# The parent list is ordered, so the first match is the earliest common parent,
# which is the best match for useful fingerprinting.
common_parent_span_id = next(
(span_id for span_id in latest_parent_list if span_id in common_parent_set), None
)
if not common_parent_span_id:
return None
all_spans = self.event.get("spans") or []
for span in all_spans:
if span.get("span_id") == common_parent_span_id:
return span
return None
def _fingerprint(self, db_hash: str, parent_span: Span) -> str:
parent_op = parent_span.get("op") or ""
parent_hash = parent_span.get("hash") or ""
full_fingerprint = hashlib.sha1(
(parent_op + parent_hash + db_hash).encode("utf8")
).hexdigest()
return f"1-{PerformanceMNPlusOneDBQueriesGroupType.type_id}-{full_fingerprint}"
| ContinuingMNPlusOne |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_release_file_details.py | {
"start": 7064,
"end": 9059
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
releasefile = ReleaseFile.objects.create(
organization_id=project.organization_id,
release_id=release.id,
file=File.objects.create(name="application.js", type="release.file"),
name="http://example.com/application.js",
)
url = reverse(
"sentry-api-0-project-release-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
"file_id": releasefile.id,
},
)
response = self.client.put(url, {"name": "foobar"})
assert response.status_code == 200, response.content
assert response.data["id"] == str(releasefile.id)
releasefile = ReleaseFile.objects.get(id=releasefile.id)
assert releasefile.name == "foobar"
assert releasefile.ident == ReleaseFile.get_ident("foobar")
def test_update_archived(self) -> None:
self.login_as(user=self.user)
self.create_release_archive()
id = urlsafe_b64encode(b"_~/index.js").decode()
url = reverse(
"sentry-api-0-project-release-file-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"version": self.release.version,
"file_id": id,
},
)
response = self.client.put(url, {"name": "foobar"})
assert response.status_code == 400
assert response.data == {"detail": INVALID_UPDATE_MESSAGE} # TODO: document this in apidocs
| ReleaseFileUpdateTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/tensor_array_ops_test.py | {
"start": 70545,
"end": 71875
} | class ____(test.Benchmark):
def _tensorArrayWriteInWhile(self):
size = 10000
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size)
(_, ta) = while_loop.while_loop(
lambda i, _: i < size,
lambda i, ta: (i + 1, ta.write(i, 0.)), [0, ta],
parallel_iterations=1)
return ta.stack()
def _benchmarkWriteInWhile(self):
ops.reset_default_graph()
op = self._tensorArrayWriteInWhile()
self.run_op_benchmark(session_lib.Session(), op)
def benchmarkWriteInWhile(self):
self._benchmarkWriteInWhile()
@test_util.enable_control_flow_v2
def benchmarkWriteInWhileWithControlFlowV2(self):
self._benchmarkWriteInWhile()
def benchmarkWriteInDatasetMapFn(self):
ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()
ds = ds.map(lambda _: self._tensorArrayWriteInWhile())
op = ds.make_one_shot_iterator().get_next()
self.run_op_benchmark(session_lib.Session(), op)
def benchmarkWriteInDatasetParallelMapFn(self):
ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()
ds = ds.map(lambda _: self._tensorArrayWriteInWhile(), num_parallel_calls=2)
op = ds.make_one_shot_iterator().get_next()
self.run_op_benchmark(session_lib.Session(), op)
if __name__ == "__main__":
test.main()
| TensorArrayBenchmark |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/mpi/package.py | {
"start": 188,
"end": 390
} | class ____(Package):
"""Virtual package for the Message Passing Interface."""
homepage = "https://www.mpi-forum.org/"
virtual = True
def test_hello(self):
print("Hello there!")
| Mpi |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_discrete_entropy_to_be_between.py | {
"start": 4377,
"end": 14168
} | class ____(ColumnAggregateExpectation):
"""Expect the column discrete entropy to be between a minimum value and a maximum value.
The Shannon entropy of a discrete probability distribution is given by
- \\sum_{i=1}^{n} P(x_i) * \\log(P(x_i))
where P(x_i) is the probability of occurrence of value x_i.
For observed data the P(x_i) are replaced by the empirical frequencies n_{x_i}/N
expect_column_discrete_entropy_to_be_between is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column standard deviation.
max_value (float or None): \
The maximum value for the column standard deviation.
strict_min (boolean):
If True, the column standard deviation must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column standard deviation must be strictly smaller than max_value, default=False
base (float):
The base of the logarithm, default=2
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
examples = [
{
"data": {
"a": [1, 2, 3, 4],
"b": ["Jarndyce", "Jarndyce", None, None],
"c": ["past", "present", "future", None],
},
"schemas": {"spark": {"a": "IntegerType", "b": "StringType", "c": "StringType"}},
"tests": [
{
"title": "positive_test_min_equal_max",
"exact_match_out": False,
"in": {"column": "a", "min_value": 2.0, "max_value": 2.0},
"out": {"success": True, "observed_value": 2.0},
},
{
"title": "positive_test_null_min",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "b", "min_value": None, "max_value": 1},
"out": {"success": True, "observed_value": 0.0},
},
{
"title": "negative_test_nondefault_kwargs_complete_result_format",
"include_in_gallery": True,
"exact_match_out": True,
"in": {
"column": "c",
"min_value": 7,
"max_value": 7,
"base": 3,
"result_format": "COMPLETE",
},
"out": {
"success": False,
"result": {
"observed_value": 1.0,
"element_count": 4,
"missing_count": 1,
"missing_percent": 25.0,
},
},
},
],
},
{
"data": {"empty_column": []},
"schemas": {"spark": {"empty_column": "IntegerType"}},
"tests": [
{
"title": "test_empty_column_should_be_zero",
"include_in_gallery": True,
"exact_match_out": False,
"in": {
"column": "empty_column",
"min_value": 0,
"max_value": 0,
"catch_exceptions": False,
},
"out": {"success": True, "observed_value": 0.0},
}
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental"], # Tags for this Expectation in the gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@edjoesu",
],
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.discrete.entropy",)
success_keys = (
"min_value",
"strict_min",
"max_value",
"strict_max",
"base",
)
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"result_format": "BASIC",
"catch_exceptions": False,
"base": 2,
}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "discrete entropy may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = f"discrete entropy must be {at_least_str} $min_value and {at_most_str} $max_value."
elif params["min_value"] is None:
template_str = f"discrete entropy must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"discrete entropy must be {at_least_str} $min_value."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.discrete.entropy",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
if __name__ == "__main__":
ExpectColumnDiscreteEntropyToBeBetween().print_diagnostic_checklist()
| ExpectColumnDiscreteEntropyToBeBetween |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_F.py | {
"start": 38,
"end": 1305
} | class ____(Benchmark):
r"""
FreudensteinRoth objective function.
This class defines the Freudenstein & Roth [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{FreudensteinRoth}}(x) = \left\{x_1 - 13 + \left[(5 - x_2) x_2
- 2 \right] x_2 \right\}^2 + \left \{x_1 - 29
+ \left[(x_2 + 1) x_2 - 14 \right] x_2 \right\}^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [[5.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2
f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2
return f1 + f2
| FreudensteinRoth |
python | numpy__numpy | benchmarks/benchmarks/bench_random.py | {
"start": 2067,
"end": 3386
} | class ____(Benchmark):
param_names = ['rng']
params = ['PCG64', 'MT19937', 'Philox', 'SFC64', 'numpy']
def setup(self, bitgen):
if bitgen == 'numpy':
self.rg = np.random.RandomState()
else:
self.rg = Generator(getattr(np.random, bitgen)())
self.rg.random()
self.int32info = np.iinfo(np.int32)
self.uint32info = np.iinfo(np.uint32)
self.uint64info = np.iinfo(np.uint64)
def time_raw(self, bitgen):
if bitgen == 'numpy':
self.rg.random_integers(self.int32info.max, size=nom_size)
else:
self.rg.integers(self.int32info.max, size=nom_size, endpoint=True)
def time_32bit(self, bitgen):
min, max = self.uint32info.min, self.uint32info.max
if bitgen == 'numpy':
self.rg.randint(min, max + 1, nom_size, dtype=np.uint32)
else:
self.rg.integers(min, max + 1, nom_size, dtype=np.uint32)
def time_64bit(self, bitgen):
min, max = self.uint64info.min, self.uint64info.max
if bitgen == 'numpy':
self.rg.randint(min, max + 1, nom_size, dtype=np.uint64)
else:
self.rg.integers(min, max + 1, nom_size, dtype=np.uint64)
def time_normal_zig(self, bitgen):
self.rg.standard_normal(nom_size)
| RNG |
python | getsentry__sentry | tests/sentry/auth/authenticators/test_u2f.py | {
"start": 459,
"end": 5930
} | class ____(TestCase):
def setUp(self) -> None:
self.u2f = U2fInterface()
self.login_as(user=self.user)
rp = PublicKeyCredentialRpEntity("richardmasentry.ngrok.io", "Sentry")
self.test_registration_server = Fido2Server(rp, verify_origin=lambda origin: True)
self.response = {
"keyHandle": "F5MKBNqJMnHX-g0jee03d0slMyvz0FMWAf1YzF9mjZhA6ePDEwt8QT2zNR-ungcffGGxpGtp4yXRC5gz8t1Lww",
"clientData": "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiRjJYS0tyZ19FY1h6OEljUjBUX3BzcUJqenc5X1VIYTFIU2premtFbTUzQSIsIm9yaWdpbiI6Imh0dHBzOi8vc2VudHJ5LmlvIiwiY3Jvc3NPcmlnaW4iOmZhbHNlLCJvdGhlcl9rZXlzX2Nhbl9iZV9hZGRlZF9oZXJlIjoiZG8gbm90IGNvbXBhcmUgY2xpZW50RGF0YUpTT04gYWdhaW5zdCBhIHRlbXBsYXRlLiBTZWUgaHR0cHM6Ly9nb28uZ2wveWFiUGV4In0",
"signatureData": "MEUCIDe2DPI7E3tWa31JN_FG5m9rhc2v2lDRsWY-Yy7jgdT0AiEA5hkw8UGEfu-d_H5CEHuGC1Cj1wvFPqiRu-c_q50R6NM",
"authenticatorData": "ss7JfEqyMJeXvxXeO3AXn9tPTh1R4bNVGkMcr6WH-08BAAAD_A",
}
self.request = self.make_request(user=self.user)
def test_start_enrollment_webauthn(self) -> None:
self.u2f.webauthn_registration_server = self.test_registration_server
encoded_challenge, state = self.u2f.start_enrollment(self.user)
challenge = cbor.decode(encoded_challenge)
assert len(state) == 2
assert state["user_verification"] == "discouraged"
assert len(state["challenge"]) == 43
assert challenge["publicKey"]["rp"] == {"id": "richardmasentry.ngrok.io", "name": "Sentry"}
assert challenge["publicKey"]["user"] == {
"id": self.user.id.to_bytes(64, byteorder="big"),
"name": self.user.username,
"displayName": self.user.username,
}
assert int.from_bytes(challenge["publicKey"]["user"]["id"], byteorder="big") == self.user.id
assert len(challenge["publicKey"]["pubKeyCredParams"]) == 4
def test_try_enroll_webauthn(self) -> None:
self.u2f.webauthn_registration_server = self.test_registration_server
state = {
"challenge": "FmKqEKsXOinMhOdNhcZbMCbGleTlDeFr0S1gSYGzPY0",
"user_verification": "discouraged",
}
data = '{"id":"TYJVkw5RJGuwyY-veny4wBvPnhIc1-2vs7a17W6fRPMevfDlTR_YWTnLwgeLjKvNaZgMDd2T75CD9bEUX3FyxQ","rawId":"TYJVkw5RJGuwyY-veny4wBvPnhIc1-2vs7a17W6fRPMevfDlTR_YWTnLwgeLjKvNaZgMDd2T75CD9bEUX3FyxQ","response":{"attestationObject":"o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVjEdMsc6ARz46ITf5wcoxCGiLKiJRlmv2GeNJ635pkBOmVBAAAA_wAAAAAAAAAAAAAAAAAAAAAAQE2CVZMOUSRrsMmPr3p8uMAbz54SHNftr7O2te1un0TzHr3w5U0f2Fk5y8IHi4yrzWmYDA3dk--Qg_WxFF9xcsWlAQIDJiABIVggo6MzqMkVN1UI6d4gf60CoBH4CnAKURH0Q8ENYnD2k6MiWCCvFWvPJs_p0zGVyBwoZDy7WyQZUAPVZhmCAzXUnapQ-A","clientDataJSON":"eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiRm1LcUVLc1hPaW5NaE9kTmhjWmJNQ2JHbGVUbERlRnIwUzFnU1lHelBZMCIsIm9yaWdpbiI6Imh0dHBzOi8vcmljaGFyZG1hc2VudHJ5Lm5ncm9rLmlvIiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ"},"type":""}'
assert len(self.u2f.config.setdefault("devices", [])) == 0
self.u2f.try_enroll("enrollment_data", data, state=state)
assert len(self.u2f.config.setdefault("devices", [])) == 1
device = self.u2f.config.setdefault("devices", [])[0]
assert device["name"] is not None
assert device["ts"] is not None
assert type(device["binding"]) is AuthenticatorData
def test_activate_webauthn(self) -> None:
self.test_try_enroll_webauthn()
result = self.u2f.activate(self.request)
assert isinstance(result, ActivationChallengeResult)
assert len(self.request.session["webauthn_authentication_state"]["challenge"]) == 43
assert self.request.session["webauthn_authentication_state"]["user_verification"] is None
def test_validate_response_state(self) -> None:
self.test_try_enroll_webauthn()
mock_state = Mock()
self.u2f.webauthn_authentication_server.authenticate_complete = mock_state
self.request.session["webauthn_authentication_state"] = "normal state"
assert self.u2f.validate_response(self.request, None, self.response)
_, kwargs = mock_state.call_args
assert kwargs.get("state") == "normal state"
assert "webauthn_authentication_state" not in self.request.session
def test_validate_response_failing_still_clears_state(self) -> None:
self.test_try_enroll_webauthn()
mock_state = Mock(side_effect=ValueError("test"))
self.u2f.webauthn_authentication_server.authenticate_complete = mock_state
self.request.session["webauthn_authentication_state"] = "state"
with raises(ValueError):
self.u2f.validate_response(self.request, None, self.response)
_, kwargs = mock_state.call_args
assert kwargs.get("state") == "state"
assert "webauthn_authentication_state" not in self.request.session
def test_validate_response_missing_session_state(self) -> None:
self.test_try_enroll_webauthn()
mock_state = Mock()
self.u2f.webauthn_authentication_server.authenticate_complete = mock_state
# Session state is missing (e.g., session expired)
assert "webauthn_authentication_state" not in self.request.session
# Should return False without calling authenticate_complete
assert not self.u2f.validate_response(self.request, None, self.response)
mock_state.assert_not_called()
| U2FInterfaceTest |
python | google__jax | tests/pallas/mgpu_matmul_test.py | {
"start": 2725,
"end": 10444
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cuda"]):
self.skipTest("Test requires an NVIDIA GPU")
self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True))
@parameterized.product(
m=(4096,),
k=(4096,),
n=(4096,),
tile_m=(64, 128),
tile_n=(64, 128),
tile_k=(64, 128),
max_concurrent_steps=(2, 4),
dtype=(jnp.float16,),
epi_tile_n=(None, 64),
epi_tile_m=(None, 64),
wg_dimension=tuple(hopper_matmul_mgpu.MatmulDimension),
)
def test_hopper_matmul(self, *args, **kwargs):
self.check_hopper_matmul(*args, **kwargs)
# Grid tiling doesn't really interact with many other options so we can test
# it separately.
@parameterized.product(
grid_minor_dim=tuple(hopper_matmul_mgpu.MatmulDimension),
grid_tile_width=(1, 3, 4),
)
def test_hopper_matmul_grid_tiling(self, grid_minor_dim, grid_tile_width):
self.check_hopper_matmul(
m=4096,
k=4096,
n=4096,
dtype=jnp.float16,
tile_m=64,
tile_n=64,
tile_k=64,
max_concurrent_steps=2,
epi_tile_m=64,
epi_tile_n=64,
wg_dimension=hopper_matmul_mgpu.MatmulDimension.M,
grid_minor_dim=grid_minor_dim,
grid_tile_width=grid_tile_width,
)
@parameterized.product(
tile_m=(64, 128),
tile_n=(64, 128),
wg_dimension=tuple(hopper_matmul_mgpu.MatmulDimension),
cluster_dimension=tuple(hopper_matmul_mgpu.MatmulDimension),
)
def test_hopper_matmul_cluster(self, tile_m, tile_n, wg_dimension, cluster_dimension):
self.check_hopper_matmul(
m=4096,
k=4096,
n=4096,
dtype=jnp.float16,
tile_m=tile_m,
tile_n=tile_n,
tile_k=64,
max_concurrent_steps=4,
epi_tile_m=64,
epi_tile_n=64,
wg_dimension=wg_dimension,
cluster_dimension=cluster_dimension,
)
def check_hopper_matmul(
self,
m,
n,
k,
dtype,
tile_m,
tile_n,
tile_k,
max_concurrent_steps,
epi_tile_m,
epi_tile_n,
wg_dimension,
**kwargs
):
if not jtu.is_cuda_compute_capability_equal("9.0"):
self.skipTest("Only works on GPU with capability sm90a")
epi_tile_size = (epi_tile_m or tile_m) * (epi_tile_n or tile_n)
num_epi_tiles = tile_m * tile_n // epi_tile_size
cta_tile_m = tile_m * (1 + (wg_dimension == hopper_matmul_mgpu.MatmulDimension.M))
cta_tile_n = tile_n * (1 + (wg_dimension == hopper_matmul_mgpu.MatmulDimension.N))
if exceeds_h100_smem(
((cta_tile_m + cta_tile_n) * tile_k * max_concurrent_steps
+ 2 * min(2, num_epi_tiles) * epi_tile_size) * 2
):
self.skipTest("Tile too big to fit into SMEM")
k1, k2, = jax.random.split(jax.random.key(42), 2)
a = jax.random.normal(k1, (m, k), dtype)
b = jax.random.normal(k2, (k, n), dtype)
spec = hopper_matmul_mgpu.TuningConfig(
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
max_concurrent_steps=max_concurrent_steps,
epi_tile_m=epi_tile_m,
epi_tile_n=epi_tile_n,
wg_dimension=wg_dimension,
**kwargs,
)
out = hopper_matmul_mgpu.matmul(a, b, spec)
out_ref = jnp.dot(a, b, precision=jax.lax.DotAlgorithmPreset.F16_F16_F32)
np.testing.assert_allclose(out, out_ref)
@parameterized.product(
m=(4096,),
k=(4096,),
n=(4096,),
tile_m=(64, 128),
tile_n=(64, 128, 256),
tile_k=(64, 128),
epi_tile_m=(None, 64),
epi_tile_n=(None, 64),
max_concurrent_steps=(2, 4),
lhs_dtype=(jnp.int8,), # TODO(bchetioui): add int4.
rhs_dtype=(jnp.bfloat16, jnp.float16),
wg_dimension=tuple(hopper_mixed_type_matmul_mgpu.MatmulDimension),
)
def test_hopper_mixed_type_matmul(self, *args, **kwargs):
self.check_hopper_mixed_type_matmul(*args, **kwargs)
def check_hopper_mixed_type_matmul(
self,
m,
n,
k,
tile_m,
tile_n,
tile_k,
max_concurrent_steps,
epi_tile_m,
epi_tile_n,
wg_dimension,
lhs_dtype,
rhs_dtype,
**kwargs,
):
if not jtu.is_cuda_compute_capability_equal("9.0"):
self.skipTest("Only works on GPU with capability sm90a")
out_dtype = rhs_dtype
lhs_bits = dtypes.itemsize_bits(lhs_dtype)
rhs_bits = dtypes.itemsize_bits(rhs_dtype)
out_bits = dtypes.itemsize_bits(out_dtype)
cta_tile_m = tile_m * (1 + (wg_dimension == hopper_mixed_type_matmul_mgpu.MatmulDimension.M))
cta_tile_n = tile_n * (1 + (wg_dimension == hopper_mixed_type_matmul_mgpu.MatmulDimension.N))
lhs_smem_bytes = cta_tile_m * tile_k * lhs_bits // 8
rhs_smem_bytes = tile_k * cta_tile_n * rhs_bits // 8
epi_tile_size = (epi_tile_m or tile_m) * (epi_tile_n or tile_n)
num_epi_tiles = tile_m * tile_n // epi_tile_size
out_smem_bytes = 2 * min(2, num_epi_tiles) * epi_tile_size * out_bits // 8
if exceeds_h100_smem(
max_concurrent_steps * (lhs_smem_bytes + rhs_smem_bytes)
+ out_smem_bytes
):
self.skipTest("Tile too big to fit into SMEM")
(k1, k2) = jax.random.split(jax.random.key(42), 2)
lhs = jax.random.randint(
k1, (m, k), minval=-5, maxval=5, dtype=jnp.int8
).astype(lhs_dtype)
rhs = jax.random.normal(k2, (k, n), rhs_dtype)
tuning_config = hopper_mixed_type_matmul_mgpu.TuningConfig(
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
epi_tile_m=epi_tile_m,
epi_tile_n=epi_tile_n,
max_concurrent_steps=max_concurrent_steps,
wg_dimension=wg_dimension,
**kwargs,
)
out = hopper_mixed_type_matmul_mgpu.mixed_matmul_kernel(
lhs, rhs, out_dtype=out_dtype, config=tuning_config
)
precision = {
jnp.float16: jax.lax.DotAlgorithmPreset.F16_F16_F32,
jnp.bfloat16: jax.lax.DotAlgorithmPreset.BF16_BF16_F32,
}[rhs_dtype]
out_ref = jnp.dot(
lhs.astype(rhs_dtype), rhs, precision=precision,
).astype(out_dtype)
np.testing.assert_allclose(out, out_ref, strict=True)
# Grid tiling doesn't really interact with many other options so we can test
# it separately.
@parameterized.product(
grid_minor_dim=tuple(hopper_matmul_mgpu.MatmulDimension),
grid_tile_width=(1, 3, 4),
)
def test_hopper_mixed_type_matmul_grid_tiling(
self, grid_minor_dim, grid_tile_width
):
self.check_hopper_mixed_type_matmul(
m=4096,
k=4096,
n=4096,
lhs_dtype=jnp.int8,
rhs_dtype=jnp.float16,
tile_m=64,
tile_n=64,
tile_k=64,
max_concurrent_steps=2,
epi_tile_m=64,
epi_tile_n=64,
wg_dimension=hopper_matmul_mgpu.MatmulDimension.M,
grid_minor_dim=grid_minor_dim,
grid_tile_width=grid_tile_width,
)
@parameterized.product(
tile_m=(64, 128),
tile_n=(64, 128),
wg_dimension=tuple(hopper_matmul_mgpu.MatmulDimension),
cluster_dimension=tuple(hopper_matmul_mgpu.MatmulDimension),
)
def test_hopper_mixed_type_matmul_cluster(
self, tile_m, tile_n, wg_dimension, cluster_dimension
):
self.check_hopper_mixed_type_matmul(
m=4096,
k=4096,
n=4096,
lhs_dtype=jnp.int8,
rhs_dtype=jnp.float16,
tile_m=tile_m,
tile_n=tile_n,
tile_k=64,
max_concurrent_steps=4,
epi_tile_m=64,
epi_tile_n=64,
wg_dimension=wg_dimension,
cluster_dimension=cluster_dimension,
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| MatrixMultiplicationSm90ATest |
python | facebook__pyre-check | documentation/examples/pytorch/sources/simple_operations.py | {
"start": 466,
"end": 1656
} | class ____:
pass
T1: Tensor[int32, [D1, D2]] = Tensor()
T2: Tensor[int32, [D2, D3]] = Tensor()
T3: Tensor[int32, [T, T]] = Tensor()
T4: Tensor[float32, [D1, D2]] = Tensor()
# T1 + T1 is correctly typed
T1p1: Tensor[int32, [D1, D2]] = T1 + T1
# T1 * T2 is correctly typed
T1m2: Tensor[int32, [D1, D3]] = mm(T1, T2)
def incorrects() -> None:
# T1 + T2 is incorrectly typed, due to different dimensions in the tensors
# pyre-fixme[58]: `+` is not supported for operand types `Tensor[int, [int,
# int]]` and `Tensor[int, [int, int]]`.
Err1 = T1 + T2 # noqa
# T1 * T3 is incorrectly typed
# pyre-fixme[6]: Expected `Tensor[DType, B, C]` for 2nd param but got
# `Tensor[int, T, T]`.
Err2 = mm(T1, T3) # noqa
# T1 + T4 is incorrectly type (dtype)
# pyre-fixme[58]: `+` is not supported for operand types `Tensor[int, [int,
# int]]` and `Tensor[float, [int, int]]`.
Err3 = T1 + T3 # noqa
# Variadics allow to use tensor of multiple shape with the same function
# Here is a usage of add with a Tensor of different shape, still works !
Tx: Tensor[int32, [D1, D1, D1, D1, D2]] = Tensor()
Txpx: Tensor[int32, [D1, D1, D1, D1, D2]] = Tx + Tx
| T |
python | keras-team__keras | keras/src/layers/pooling/max_pooling1d.py | {
"start": 181,
"end": 3346
} | class ____(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| MaxPooling1D |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk3.py | {
"start": 1539,
"end": 11899
} | class ____(_FigureCanvasGTK, Gtk.DrawingArea):
required_interactive_framework = "gtk3"
manager_class = _api.classproperty(lambda cls: FigureManagerGTK3)
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.BUTTON_RELEASE_MASK
| Gdk.EventMask.EXPOSURE_MASK
| Gdk.EventMask.KEY_PRESS_MASK
| Gdk.EventMask.KEY_RELEASE_MASK
| Gdk.EventMask.ENTER_NOTIFY_MASK
| Gdk.EventMask.LEAVE_NOTIFY_MASK
| Gdk.EventMask.POINTER_MOTION_MASK
| Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure=None):
super().__init__(figure=figure)
self._idle_draw_id = 0
self._rubberband_rect = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('screen-changed', self._update_device_pixel_ratio)
self.connect('notify::scale-factor', self._update_device_pixel_ratio)
self.connect('draw', self.on_draw_event)
self.connect('draw', self._post_draw)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_can_focus(True)
css = Gtk.CssProvider()
css.load_from_data(b".matplotlib-canvas { background-color: white; }")
style_ctx = self.get_style_context()
style_ctx.add_provider(css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
style_ctx.add_class("matplotlib-canvas")
def destroy(self):
CloseEvent("close_event", self)._process()
super().destroy()
def set_cursor(self, cursor):
# docstring inherited
window = self.get_property("window")
if window is not None:
window.set_cursor(_mpl_to_gtk_cursor(cursor))
context = GLib.MainContext.default()
context.iteration(True)
def _mpl_coords(self, event=None):
"""
Convert the position of a GTK event, or of the current cursor position
if *event* is None, to Matplotlib coordinates.
GTK use logical pixels, but the figure is scaled to physical pixels for
rendering. Transform to physical pixels so that all of the down-stream
transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
if event is None:
window = self.get_window()
t, x, y, state = window.get_device_position(
window.get_display().get_device_manager().get_client_pointer())
else:
x, y = event.x, event.y
x = x * self.device_pixel_ratio
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height - y * self.device_pixel_ratio
return x, y
def scroll_event(self, widget, event):
step = 1 if event.direction == Gdk.ScrollDirection.UP else -1
MouseEvent("scroll_event", self,
*self._mpl_coords(event), step=step,
modifiers=self._mpl_modifiers(event.state),
guiEvent=event)._process()
return False # finish event propagation?
def button_press_event(self, widget, event):
MouseEvent("button_press_event", self,
*self._mpl_coords(event), event.button,
modifiers=self._mpl_modifiers(event.state),
guiEvent=event)._process()
return False # finish event propagation?
def button_release_event(self, widget, event):
MouseEvent("button_release_event", self,
*self._mpl_coords(event), event.button,
modifiers=self._mpl_modifiers(event.state),
guiEvent=event)._process()
return False # finish event propagation?
def key_press_event(self, widget, event):
KeyEvent("key_press_event", self,
self._get_key(event), *self._mpl_coords(),
guiEvent=event)._process()
return True # stop event propagation
def key_release_event(self, widget, event):
KeyEvent("key_release_event", self,
self._get_key(event), *self._mpl_coords(),
guiEvent=event)._process()
return True # stop event propagation
def motion_notify_event(self, widget, event):
MouseEvent("motion_notify_event", self, *self._mpl_coords(event),
buttons=self._mpl_buttons(event.state),
modifiers=self._mpl_modifiers(event.state),
guiEvent=event)._process()
return False # finish event propagation?
def enter_notify_event(self, widget, event):
gtk_mods = Gdk.Keymap.get_for_display(
self.get_display()).get_modifier_state()
LocationEvent("figure_enter_event", self, *self._mpl_coords(event),
modifiers=self._mpl_modifiers(gtk_mods),
guiEvent=event)._process()
def leave_notify_event(self, widget, event):
gtk_mods = Gdk.Keymap.get_for_display(
self.get_display()).get_modifier_state()
LocationEvent("figure_leave_event", self, *self._mpl_coords(event),
modifiers=self._mpl_modifiers(gtk_mods),
guiEvent=event)._process()
def size_allocate(self, widget, allocation):
dpival = self.figure.dpi
winch = allocation.width * self.device_pixel_ratio / dpival
hinch = allocation.height * self.device_pixel_ratio / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
ResizeEvent("resize_event", self)._process()
self.draw_idle()
@staticmethod
def _mpl_buttons(event_state):
modifiers = [
(MouseButton.LEFT, Gdk.ModifierType.BUTTON1_MASK),
(MouseButton.MIDDLE, Gdk.ModifierType.BUTTON2_MASK),
(MouseButton.RIGHT, Gdk.ModifierType.BUTTON3_MASK),
(MouseButton.BACK, Gdk.ModifierType.BUTTON4_MASK),
(MouseButton.FORWARD, Gdk.ModifierType.BUTTON5_MASK),
]
# State *before* press/release.
return [name for name, mask in modifiers if event_state & mask]
@staticmethod
def _mpl_modifiers(event_state, *, exclude=None):
modifiers = [
("ctrl", Gdk.ModifierType.CONTROL_MASK, "control"),
("alt", Gdk.ModifierType.MOD1_MASK, "alt"),
("shift", Gdk.ModifierType.SHIFT_MASK, "shift"),
("super", Gdk.ModifierType.MOD4_MASK, "super"),
]
return [name for name, mask, key in modifiers
if exclude != key and event_state & mask]
def _get_key(self, event):
unikey = chr(Gdk.keyval_to_unicode(event.keyval))
key = cbook._unikey_or_keysym_to_mplkey(
unikey, Gdk.keyval_name(event.keyval))
mods = self._mpl_modifiers(event.state, exclude=key)
if "shift" in mods and unikey.isprintable():
mods.remove("shift")
return "+".join([*mods, key])
def _update_device_pixel_ratio(self, *args, **kwargs):
# We need to be careful in cases with mixed resolution displays if
# device_pixel_ratio changes.
if self._set_device_pixel_ratio(self.get_scale_factor()):
# The easiest way to resize the canvas is to emit a resize event
# since we implement all the logic for resizing the canvas for that
# event.
self.queue_resize()
self.queue_draw()
def configure_event(self, widget, event):
if widget.get_property("window") is None:
return
w = event.width * self.device_pixel_ratio
h = event.height * self.device_pixel_ratio
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w / dpi, h / dpi, forward=False)
return False # finish event propagation?
def _draw_rubberband(self, rect):
self._rubberband_rect = rect
# TODO: Only update the rubberband area.
self.queue_draw()
def _post_draw(self, widget, ctx):
if self._rubberband_rect is None:
return
x0, y0, w, h = (dim / self.device_pixel_ratio
for dim in self._rubberband_rect)
x1 = x0 + w
y1 = y0 + h
# Draw the lines from x0, y0 towards x1, y1 so that the
# dashes don't "jump" when moving the zoom box.
ctx.move_to(x0, y0)
ctx.line_to(x0, y1)
ctx.move_to(x0, y0)
ctx.line_to(x1, y0)
ctx.move_to(x0, y1)
ctx.line_to(x1, y1)
ctx.move_to(x1, y0)
ctx.line_to(x1, y1)
ctx.set_antialias(1)
ctx.set_line_width(1)
ctx.set_dash((3, 3), 0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
ctx.set_dash((3, 3), 3)
ctx.set_source_rgb(1, 1, 1)
ctx.stroke()
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
# docstring inherited
if self.is_drawable():
self.queue_draw()
def draw_idle(self):
# docstring inherited
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def flush_events(self):
# docstring inherited
context = GLib.MainContext.default()
while context.pending():
context.iteration(True)
| FigureCanvasGTK3 |
python | huggingface__transformers | src/transformers/models/whisper/configuration_whisper.py | {
"start": 1922,
"end": 14619
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`WhisperModel`]. It is used to instantiate a
Whisper model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Whisper
[openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 51865):
Vocabulary size of the Whisper model. Defines the number of different tokens that can be represented by the
`decoder_input_ids` passed when calling [`WhisperModel`]
num_mel_bins (`int`, *optional*, defaults to 80):
Number of mel features used per input features. Should correspond to the value used in the
`WhisperProcessor` class.
encoder_layers (`int`, *optional*, defaults to 4):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 4):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_start_token_id (`int`, *optional*, defaults to 50257):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
d_model (`int`, *optional*, defaults to 384):
Dimensionality of the layers.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to False):
Scale embeddings by diving by sqrt(d_model).
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
max_target_positions (`int`, *optional*, defaults to 448):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
pad_token_id (`int`, *optional*, defaults to 50256):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 50256):
Begin of stream token id.
eos_token_id (`int`, *optional*, defaults to 50256):
End of stream token id.
suppress_tokens (`list[int]`, *optional*):
A list containing the non-speech tokens that will be used by the logit processor in the `generate`
function. NON_SPEECH_TOKENS and NON_SPEECH_TOKENS_MULTI each correspond to the `english-only` and the
`multilingual` model.
begin_suppress_tokens (`list[int]`, *optional*, defaults to `[220,50256]`):
A list containing tokens that will be suppressed at the beginning of the sampling process. Initialized as
the token for `" "` (`blank_token_id`) and the `eos_token_id`
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`WhisperForAudioClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification. Only relevant when using an
instance of [`WhisperForAudioClassification`].
apply_spec_augment (`bool`, *optional*, defaults to `False`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://huggingface.co/papers/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procedure generates `mask_time_prob*len(time_axis)/mask_time_length` independent masks over the axis. If
reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment == True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procedure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over
the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
`mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`.
median_filter_width (`int`, *optional*, defaults to 7):
Width of the median filter used to smoothen to cross-attention outputs when computing token timestamps.
Should be an odd number.
Example:
```python
>>> from transformers import WhisperConfig, WhisperModel
>>> # Initializing a Whisper tiny style configuration
>>> configuration = WhisperConfig()
>>> # Initializing a model (with random weights) from the tiny style configuration
>>> model = WhisperModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "whisper"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_key_value_heads": "encoder_attention_heads",
"num_attention_heads": "encoder_attention_heads",
"hidden_size": "d_model",
}
def __init__(
self,
vocab_size=51865,
num_mel_bins=80,
encoder_layers=4,
encoder_attention_heads=6,
decoder_layers=4,
decoder_attention_heads=6,
decoder_ffn_dim=1536,
encoder_ffn_dim=1536,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
decoder_start_token_id=50257,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=384,
dropout=0.0,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
scale_embedding=False,
max_source_positions=1500,
max_target_positions=448,
pad_token_id=50256,
bos_token_id=50256,
eos_token_id=50256,
suppress_tokens=None,
begin_suppress_tokens=[220, 50256],
use_weighted_layer_sum=False,
classifier_proj_size=256,
apply_spec_augment=False,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
median_filter_width=7,
**kwargs,
):
self.vocab_size = vocab_size
self.num_mel_bins = num_mel_bins
self.d_model = d_model
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
self.use_weighted_layer_sum = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.median_filter_width = median_filter_width
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
suppress_tokens=suppress_tokens,
begin_suppress_tokens=begin_suppress_tokens,
**kwargs,
)
__all__ = ["WhisperConfig"]
| WhisperConfig |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_queried_column_value_frequency_to_meet_threshold.py | {
"start": 675,
"end": 7003
} | class ____(QueryExpectation):
"""Expect the frequency of occurrences of a specified value in a queried column to be at least <threshold> percent of values in that column."""
column: str
threshold: Union[float, List[float]]
value: Union[str, List[str]]
metric_dependencies = ("query.column",)
query = """
SELECT {col},
CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM {batch})
FROM {batch}
GROUP BY {col}
"""
success_keys = (
"column",
"value",
"threshold",
"query",
)
domain_keys = ("batch_id", "row_condition", "condition_parser")
default_kwarg_values = {
"result_format": "BASIC",
"catch_exceptions": False,
"meta": None,
"column": None,
"value": None,
"threshold": 1,
"query": query,
}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
super().validate_configuration(configuration)
configuration = configuration or self.configuration
value = configuration["kwargs"].get("value")
threshold = configuration["kwargs"].get("threshold")
try:
assert value is not None, "'value' must be specified"
assert (isinstance(threshold, (int, float)) and 0 < threshold <= 1) or (
isinstance(threshold, list)
and all(isinstance(x, (int, float)) for x in threshold)
and all(0 < x <= 1 for x in threshold)
and 0 < sum(threshold) <= 1
), (
"'threshold' must be 1, a float between 0 and 1, or a list of floats whose sum is between 0 and 1"
)
if isinstance(threshold, list):
assert isinstance(value, list) and len(value) == len(threshold), (
"'value' and 'threshold' must contain the same number of arguments"
)
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
def _validate(
self,
metrics: dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
) -> Union[ExpectationValidationResult, dict]:
configuration = self.configuration
value = configuration["kwargs"].get("value")
threshold = configuration["kwargs"].get("threshold")
query_result = metrics.get("query.column")
query_result = dict([element.values() for element in query_result])
if isinstance(value, list):
success = all(query_result[value[i]] >= threshold[i] for i in range(len(value)))
return {
"success": success,
"result": {"observed_value": [query_result[value[i]] for i in range(len(value))]},
}
success = query_result[value] >= threshold
return {
"success": success,
"result": {"observed_value": query_result[value]},
}
examples = [
{
"data": [
{
"data": {
"col1": [1, 2, 2, 3, 4],
"col2": ["a", "a", "b", "b", "a"],
},
},
],
"suppress_test_for": ["bigquery", "trino", "snowflake"],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": "a",
"threshold": 0.6,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col1",
"value": 2,
"threshold": 1,
},
"out": {"success": False},
},
{
"title": "multi_value_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.6, 0.4],
},
"out": {"success": True},
},
{
"title": "multi_value_positive_test_static_data_asset",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.6, 0.4],
"query": """
SELECT {col},
CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM test)
FROM test
GROUP BY {col}
""",
},
"out": {"success": True},
},
{
"title": "multi_value_positive_test_row_condition",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.5, 0.5],
"row_condition": 'col("col1")==2',
"condition_parser": "great_expectations",
},
"out": {"success": True},
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["query-based"],
"contributors": ["@austiezr", "@mkopec87"],
}
if __name__ == "__main__":
ExpectQueriedColumnValueFrequencyToMeetThreshold().print_diagnostic_checklist()
| ExpectQueriedColumnValueFrequencyToMeetThreshold |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess3.py | {
"start": 1002,
"end": 1097
} | class ____(Parent[_TChild]):
def __init__(self, val: _TChild):
self.member1 = val
| Child |
python | walkccc__LeetCode | solutions/158. Read N Characters Given Read4 II - Call multiple times/158.py | {
"start": 82,
"end": 618
} | class ____:
def read(self, buf: list[str], n: int) -> int:
i = 0 # buf's index
while i < n:
if self.i4 == self.n4: # All the characters in the buf4 are consumed.
self.i4 = 0 # Reset the buf4's index.
# Read <= 4 characters from the file to the buf4.
self.n4 = read4(self.buf4)
if self.n4 == 0: # Reach the EOF.
return i
buf[i] = self.buf4[self.i4]
i += 1
self.i4 += 1
return i
buf4 = [' '] * 4
i4 = 0 # buf4's index
n4 = 0 # buf4's size
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/student_t_test.py | {
"start": 1529,
"end": 19332
} | class ____(test.TestCase):
def testStudentPDFAndLogPDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=-sigma) # pylint: disable=invalid-unary-operand-type
log_pdf = student.log_prob(t)
self.assertEqual(log_pdf.get_shape(), (6,))
log_pdf_values = self.evaluate(log_pdf)
pdf = student.prob(t)
self.assertEqual(pdf.get_shape(), (6,))
pdf_values = self.evaluate(pdf)
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentLogPDFMultidimensional(self):
batch_size = 6
df = constant_op.constant([[1.5, 7.2]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[-math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = np.array([1.5, 7.2])
mu_v = np.array([3., -3.])
sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_pdf = student.log_prob(t)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = student.prob(t)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentCDFAndLogCDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([-8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_cdf = student.log_cdf(t)
self.assertEqual(log_cdf.get_shape(), (6,))
log_cdf_values = self.evaluate(log_cdf)
cdf = student.cdf(t)
self.assertEqual(cdf.get_shape(), (6,))
cdf_values = self.evaluate(cdf)
if not stats:
return
expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5)
def testStudentEntropy(self):
df_v = np.array([[2., 3., 7.]]) # 1x3
mu_v = np.array([[1., -1, 0]]) # 1x3
sigma_v = np.array([[1., -2., 3.]]).T # transposed => 3x1
student = student_t.StudentT(df=df_v, loc=mu_v, scale=sigma_v)
ent = student.entropy()
ent_values = self.evaluate(ent)
# Help scipy broadcast to 3x3
ones = np.array([[1, 1, 1]])
sigma_bc = np.abs(sigma_v) * ones
mu_bc = ones.T * mu_v
df_bc = ones.T * df_v
if not stats:
return
expected_entropy = stats.t.entropy(
np.reshape(df_bc, [-1]),
loc=np.reshape(mu_bc, [-1]),
scale=np.reshape(sigma_bc, [-1]))
expected_entropy = np.reshape(expected_entropy, df_bc.shape)
self.assertAllClose(expected_entropy, ent_values)
def testStudentSample(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(-math.sqrt(10.))
df_v = 4.
mu_v = 3.
sigma_v = np.sqrt(10.)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val,))
self.assertAllClose(sample_values.mean(), mu_v, rtol=0.1, atol=0)
self.assertAllClose(
sample_values.var(), sigma_v**2 * df_v / (df_v - 2), rtol=0.1, atol=0)
self._checkKLApprox(df_v, mu_v, sigma_v, sample_values)
# Test that sampling with the same seed twice gives the same results.
def testStudentSampleMultipleTimes(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(math.sqrt(10.))
n = constant_op.constant(100)
random_seed.set_random_seed(654321)
student = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t1")
samples1 = self.evaluate(student.sample(n, seed=123456))
random_seed.set_random_seed(654321)
student2 = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t2")
samples2 = self.evaluate(student2.sample(n, seed=123456))
self.assertAllClose(samples1, samples2)
def testStudentSampleSmallDfNoNan(self):
df_v = [1e-1, 1e-5, 1e-10, 1e-20]
df = constant_op.constant(df_v)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=1., scale=1.)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val, 4))
self.assertTrue(np.all(np.logical_not(np.isnan(sample_values))))
def testStudentSampleMultiDimensional(self):
batch_size = 7
df = constant_op.constant([[5., 7.]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = [5., 7.]
mu_v = [3., -3.]
sigma_v = [np.sqrt(10.), np.sqrt(15.)]
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
self.assertAllClose(
sample_values[:, 0, 0].mean(), mu_v[0], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 0].var(),
sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
self.assertAllClose(
sample_values[:, 0, 1].mean(), mu_v[1], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 1].var(),
sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[1], mu_v[1], sigma_v[1], sample_values[:, 0, 1])
def _checkKLApprox(self, df, mu, sigma, samples):
n = samples.size
np.random.seed(137)
if not stats:
return
sample_scipy = stats.t.rvs(df, loc=mu, scale=sigma, size=n)
covg = 0.99
r = stats.t.interval(covg, df, loc=mu, scale=sigma)
bins = 100
hist, _ = np.histogram(samples, bins=bins, range=r)
hist_scipy, _ = np.histogram(sample_scipy, bins=bins, range=r)
self.assertGreater(hist.sum(), n * (covg - .01))
self.assertGreater(hist_scipy.sum(), n * (covg - .01))
hist_min1 = hist + 1. # put at least one item in each bucket
hist_norm = hist_min1 / hist_min1.sum()
hist_scipy_min1 = hist_scipy + 1. # put at least one item in each bucket
hist_scipy_norm = hist_scipy_min1 / hist_scipy_min1.sum()
kl_appx = np.sum(np.log(hist_scipy_norm / hist_norm) * hist_scipy_norm)
self.assertLess(kl_appx, 1)
def testBroadcastingParams(self):
def _check(student):
self.assertEqual(student.mean().get_shape(), (3,))
self.assertEqual(student.variance().get_shape(), (3,))
self.assertEqual(student.entropy().get_shape(), (3,))
self.assertEqual(student.log_prob(2.).get_shape(), (3,))
self.assertEqual(student.prob(2.).get_shape(), (3,))
self.assertEqual(student.sample(37).get_shape(), (37, 3,))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def testBroadcastingPdfArgs(self):
def _assert_shape(student, arg, shape):
self.assertEqual(student.log_prob(arg).get_shape(), shape)
self.assertEqual(student.prob(arg).get_shape(), shape)
def _check(student):
_assert_shape(student, 2., (3,))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (3,))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def _check2d(student):
_assert_shape(student, 2., (1, 3))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (1, 3))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check2d(student_t.StudentT(df=[[2., 3., 4.,]], loc=2., scale=1.))
_check2d(student_t.StudentT(df=7., loc=[[2., 3., 4.,]], scale=1.))
_check2d(student_t.StudentT(df=7., loc=3., scale=[[2., 3., 4.,]]))
def _check2d_rows(student):
_assert_shape(student, 2., (3, 1))
xs = np.array([2., 3., 4.], dtype=np.float32) # (3,)
_assert_shape(student, xs, (3, 3))
xs = np.array([xs]) # (1,3)
_assert_shape(student, xs, (3, 3))
xs = xs.T # (3,1)
_assert_shape(student, xs, (3, 1))
_check2d_rows(student_t.StudentT(df=[[2.], [3.], [4.]], loc=2., scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=[[2.], [3.], [4.]], scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=3., scale=[[2.], [3.], [4.]]))
def testMeanAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(df=[3., 5., 7.], loc=mu, scale=[3., 2., 1.])
mean = self.evaluate(student.mean())
self.assertAllClose([1., 3.3, 4.4], mean)
def testMeanAllowNanStatsIsFalseRaisesWhenBatchMemberIsUndefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(
df=[0.5, 5., 7.], loc=mu, scale=[3., 2., 1.], allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.mean())
def testMeanAllowNanStatsIsTrueReturnsNaNForUndefinedBatchMembers(self):
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=[0.5, 1., 3., 5., 7.], loc=mu, scale=sigma, allow_nan_stats=True)
mean = self.evaluate(student.mean())
self.assertAllClose([np.nan, np.nan, 1., 3.3, 4.4], mean)
def testVarianceAllowNanStatsTrueReturnsNaNforUndefinedBatchMembers(self):
# df = 0.5 ==> undefined mean ==> undefined variance.
# df = 1.5 ==> infinite variance.
df = [0.5, 1.5, 3., 5., 7.]
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=df, loc=mu, scale=sigma, allow_nan_stats=True)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
# Slicing off first element due to nan/inf mismatch in different SciPy
# versions.
self.assertAllClose(expected_var[1:], var[1:])
def testVarianceAllowNanStatsFalseGivesCorrectValueForDefinedBatchMembers(
self):
# df = 1.5 ==> infinite variance.
df = [1.5, 3., 5., 7.]
mu = [0., 1., 3.3, 4.4]
sigma = [4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_var, var)
def testVarianceAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# df <= 1 ==> variance not defined
student = student_t.StudentT(df=1., loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
# df <= 1 ==> variance not defined
student = student_t.StudentT(
df=0.5, loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
def testStd(self):
# Defined for all batch members.
df = [3.5, 5., 3., 5., 7.]
mu = [-2.2]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
stddev = self.evaluate(student.stddev())
mu *= len(df)
if not stats:
return
expected_stddev = [
stats.t.std(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_stddev, stddev)
def testMode(self):
df = [0.5, 1., 3]
mu = [-1, 0., 1]
sigma = [5., 4., 3.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
mode = self.evaluate(student.mode())
self.assertAllClose([-1., 0, 1], mode)
def testPdfOfSample(self):
student = student_t.StudentT(df=3., loc=np.pi, scale=1.)
num = 20000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
mean = student.mean()
mean_pdf = student.prob(student.mean())
sample_vals, pdf_vals, mean_val, mean_pdf_val = self.evaluate(
[samples, pdfs, student.mean(), mean_pdf])
self.assertEqual(samples.get_shape(), (num,))
self.assertEqual(pdfs.get_shape(), (num,))
self.assertEqual(mean.get_shape(), ())
self.assertNear(np.pi, np.mean(sample_vals), err=0.1)
self.assertNear(np.pi, mean_val, err=1e-6)
# Verify integral over sample*pdf ~= 1.
# Tolerance increased since eager was getting a value of 1.002041.
self._assertIntegral(sample_vals, pdf_vals, err=5e-2)
if not stats:
return
self.assertNear(stats.t.pdf(np.pi, 3., loc=np.pi), mean_pdf_val, err=1e-6)
def testFullyReparameterized(self):
df = constant_op.constant(2.0)
mu = constant_op.constant(1.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(df)
tape.watch(mu)
tape.watch(sigma)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(100)
grad_df, grad_mu, grad_sigma = tape.gradient(samples, [df, mu, sigma])
self.assertIsNotNone(grad_df)
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
def testPdfOfSampleMultiDims(self):
student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
self.assertAllEqual([], student.event_shape)
self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
self.assertAllEqual([2, 2], student.batch_shape)
self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
num = 50000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=0.1)
self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=0.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.05)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.05)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.05)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.05)
if not stats:
return
self.assertNear(
stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 0]),
err=1.0)
self.assertNear(
stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 1]),
err=1.0)
def _assertIntegral(self, sample_vals, pdf_vals, err=1.5e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (sample_vals.min() - 1000, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNegativeDofFails(self):
with self.assertRaisesOpError(r"Condition x > 0 did not hold"):
student = student_t.StudentT(
df=[2, -5.], loc=0., scale=1., validate_args=True, name="S")
self.evaluate(student.mean())
def testStudentTWithAbsDfSoftplusScale(self):
df = constant_op.constant([-3.2, -4.6])
mu = constant_op.constant([-4.2, 3.4])
sigma = constant_op.constant([-6.4, -8.8])
student = student_t.StudentTWithAbsDfSoftplusScale(
df=df, loc=mu, scale=sigma)
self.assertAllClose(
math_ops.floor(self.evaluate(math_ops.abs(df))),
self.evaluate(student.df))
self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc))
self.assertAllClose(
self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale))
if __name__ == "__main__":
test.main()
| StudentTTest |
python | openai__openai-python | src/openai/types/chat/completion_create_params.py | {
"start": 16577,
"end": 17262
} | class ____(CompletionCreateParamsBase, total=False):
stream: Optional[Literal[False]]
"""
If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
"""
| CompletionCreateParamsNonStreaming |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position08.py | {
"start": 315,
"end": 1688
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
chart.axis_ids = [60888960, 79670656]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write("A1", "Foo", bold)
worksheet.write("B1", "Bar", italic)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.set_row(12, None, None, {"hidden": True})
worksheet.set_column("F:F", None, None, {"hidden": True})
chart.add_series({"values": "=Sheet1!$A$2:$A$6"})
chart.add_series({"values": "=Sheet1!$B$2:$B$6"})
chart.add_series({"values": "=Sheet1!$C$2:$C$6"})
worksheet.insert_chart("E9", chart, {"object_position": 2})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_latest_release_handler.py | {
"start": 636,
"end": 6228
} | class ____(ConditionTestCase):
condition = Condition.LATEST_RELEASE
payload = {
"id": LatestReleaseFilter.id,
}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(event=self.group_event, group=self.group_event.group)
self.dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison is True
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
self.dc.comparison = False
self.dc.save()
self.dc.comparison = {"time": "asdf"}
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison = "hello"
with pytest.raises(ValidationError):
self.dc.save()
def test_latest_release(self) -> None:
old_release = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC),
)
old_release.add_project(self.project)
new_release = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386, tzinfo=UTC),
)
new_release.add_project(self.project)
self.event.data["tags"] = (("release", new_release.version),)
self.assert_passes(self.dc, self.event_data)
def test_latest_release_no_match(self) -> None:
old_release = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC),
)
old_release.add_project(self.project)
new_release = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386, tzinfo=UTC),
)
new_release.add_project(self.project)
self.event.data["tags"] = (("release", old_release.version),)
self.assert_does_not_pass(self.dc, self.event_data)
def test_caching(self) -> None:
old_release = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC),
)
old_release.add_project(self.project)
self.event.data["tags"] = (("release", old_release.version),)
self.assert_passes(self.dc, self.event_data)
new_release = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386, tzinfo=UTC),
)
new_release.add_project(self.project)
# ensure we clear the cache after creating a new release
cache_key = get_project_release_cache_key(self.event.group.project_id)
assert cache.get(cache_key) is None
self.assert_does_not_pass(self.dc, self.event_data)
# ensure we clear the cache when a release is deleted
new_release.safe_delete()
cache_key = get_project_release_cache_key(self.event.group.project_id)
assert cache.get(cache_key) is None
# rule should pass again because the latest release is oldRelease
self.assert_passes(self.dc, self.event_data)
def test_latest_release_with_environment(self) -> None:
self.create_release(
project=self.event.group.project,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC),
environments=[self.environment],
)
new_release = self.create_release(
project=self.event.group.project,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386, tzinfo=UTC),
environments=[self.environment],
)
other_env_release = self.create_release(
project=self.event.group.project,
version="4",
date_added=datetime(2020, 9, 3, 3, 8, 24, 880386, tzinfo=UTC),
)
self.event_data = WorkflowEventData(
event=self.group_event, workflow_env=self.environment, group=self.group_event.group
)
self.event.data["tags"] = (("release", new_release.version),)
self.assert_passes(self.dc, self.event_data)
self.event.data["tags"] = (("release", other_env_release.version),)
self.assert_does_not_pass(self.dc, self.event_data)
@patch("sentry.search.utils.get_latest_release")
def test_release_does_not_exist(self, mock_get_latest_release: MagicMock) -> None:
mock_get_latest_release.side_effect = Release.DoesNotExist
self.assert_does_not_pass(self.dc, self.event_data)
@patch.object(Release.objects, "get", return_value=None)
def test_no_release_object(self, mock_get: MagicMock) -> None:
newRelease = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386, tzinfo=UTC),
)
newRelease.add_project(self.project)
self.assert_does_not_pass(self.dc, self.event_data)
| TestLatestReleaseCondition |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/database.py | {
"start": 5557,
"end": 12729
} | class ____(metaclass=_EDMeta):
"""
A Hypothesis database, for use in |settings.database|.
Hypothesis automatically saves failures to the database set in
|settings.database|. The next time the test is run, Hypothesis will replay
any failures from the database in |settings.database| for that test (in
|Phase.reuse|).
The database is best thought of as a cache that you never need to invalidate.
Entries may be transparently dropped when upgrading your Hypothesis version
or changing your test. Do not rely on the database for correctness; to ensure
Hypothesis always tries an input, use |@example|.
A Hypothesis database is a simple mapping of bytes to sets of bytes. Hypothesis
provides several concrete database subclasses. To write your own database class,
see :doc:`/how-to/custom-database`.
Change listening
----------------
An optional extension to |ExampleDatabase| is change listening. On databases
which support change listening, calling |ExampleDatabase.add_listener| adds
a function as a change listener, which will be called whenever a value is
added, deleted, or moved inside the database. See |ExampleDatabase.add_listener|
for details.
All databases in Hypothesis support change listening. Custom database classes
are not required to support change listening, though they will not be compatible
with features that require change listening until they do so.
.. note::
While no Hypothesis features currently require change listening, change
listening is required by `HypoFuzz <https://hypofuzz.com/>`_.
Database methods
----------------
Required methods:
* |ExampleDatabase.save|
* |ExampleDatabase.fetch|
* |ExampleDatabase.delete|
Optional methods:
* |ExampleDatabase.move|
Change listening methods:
* |ExampleDatabase.add_listener|
* |ExampleDatabase.remove_listener|
* |ExampleDatabase.clear_listeners|
* |ExampleDatabase._start_listening|
* |ExampleDatabase._stop_listening|
* |ExampleDatabase._broadcast_change|
"""
def __init__(self) -> None:
self._listeners: list[ListenerT] = []
@abc.abstractmethod
def save(self, key: bytes, value: bytes) -> None:
"""Save ``value`` under ``key``.
If ``value`` is already present in ``key``, silently do nothing.
"""
raise NotImplementedError(f"{type(self).__name__}.save")
@abc.abstractmethod
def fetch(self, key: bytes) -> Iterable[bytes]:
"""Return an iterable over all values matching this key."""
raise NotImplementedError(f"{type(self).__name__}.fetch")
@abc.abstractmethod
def delete(self, key: bytes, value: bytes) -> None:
"""Remove ``value`` from ``key``.
If ``value`` is not present in ``key``, silently do nothing.
"""
raise NotImplementedError(f"{type(self).__name__}.delete")
def move(self, src: bytes, dest: bytes, value: bytes) -> None:
"""
Move ``value`` from key ``src`` to key ``dest``.
Equivalent to ``delete(src, value)`` followed by ``save(src, value)``,
but may have a more efficient implementation.
Note that ``value`` will be inserted at ``dest`` regardless of whether
it is currently present at ``src``.
"""
if src == dest:
self.save(src, value)
return
self.delete(src, value)
self.save(dest, value)
def add_listener(self, f: ListenerT, /) -> None:
"""
Add a change listener. ``f`` will be called whenever a value is saved,
deleted, or moved in the database.
``f`` can be called with two different event values:
* ``("save", (key, value))``
* ``("delete", (key, value))``
where ``key`` and ``value`` are both ``bytes``.
There is no ``move`` event. Instead, a move is broadcasted as a
``delete`` event followed by a ``save`` event.
For the ``delete`` event, ``value`` may be ``None``. This might occur if
the database knows that a deletion has occurred in ``key``, but does not
know what value was deleted.
"""
had_listeners = bool(self._listeners)
self._listeners.append(f)
if not had_listeners:
self._start_listening()
def remove_listener(self, f: ListenerT, /) -> None:
"""
Removes ``f`` from the list of change listeners.
If ``f`` is not in the list of change listeners, silently do nothing.
"""
if f not in self._listeners:
return
self._listeners.remove(f)
if not self._listeners:
self._stop_listening()
def clear_listeners(self) -> None:
"""Remove all change listeners."""
had_listeners = bool(self._listeners)
self._listeners.clear()
if had_listeners:
self._stop_listening()
def _broadcast_change(self, event: ListenerEventT) -> None:
"""
Called when a value has been either added to or deleted from a key in
the underlying database store. The possible values for ``event`` are:
* ``("save", (key, value))``
* ``("delete", (key, value))``
``value`` may be ``None`` for the ``delete`` event, indicating we know
that some value was deleted under this key, but not its exact value.
Note that you should not assume your instance is the only reference to
the underlying database store. For example, if two instances of
|DirectoryBasedExampleDatabase| reference the same directory,
_broadcast_change should be called whenever a file is added or removed
from the directory, even if that database was not responsible for
changing the file.
"""
for listener in self._listeners:
listener(event)
def _start_listening(self) -> None:
"""
Called when the database adds a change listener, and did not previously
have any change listeners. Intended to allow databases to wait to start
expensive listening operations until necessary.
``_start_listening`` and ``_stop_listening`` are guaranteed to alternate,
so you do not need to handle the case of multiple consecutive
``_start_listening`` calls without an intermediate ``_stop_listening``
call.
"""
warnings.warn(
f"{self.__class__} does not support listening for changes",
HypothesisWarning,
stacklevel=4,
)
def _stop_listening(self) -> None:
"""
Called whenever no change listeners remain on the database.
``_stop_listening`` and ``_start_listening`` are guaranteed to alternate,
so you do not need to handle the case of multiple consecutive
``_stop_listening`` calls without an intermediate ``_start_listening``
call.
"""
warnings.warn(
f"{self.__class__} does not support stopping listening for changes",
HypothesisWarning,
stacklevel=4,
)
| ExampleDatabase |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/overview/main.py | {
"start": 2893,
"end": 3543
} | class ____(webapp2.RequestHandler):
def post(self):
# We set the parent key on each 'Greeting' to ensure each guestbook's
# greetings are in the same entity group.
guestbook_name = self.request.get("guestbook_name")
greeting = Greeting(
parent=ndb.Key("Book", guestbook_name or "*notitle*"),
content=self.request.get("content"),
)
greeting.put()
# [END gae_ndb_overview_submit]
self.redirect("/?" + urllib.urlencode({"guestbook_name": guestbook_name}))
app = webapp2.WSGIApplication([("/", MainPage), ("/sign", SubmitForm)])
# [END gae_ndb_overview]
| SubmitForm |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 4559,
"end": 6373
} | class ____:
@inference_state_method_cache()
def _get_comp_for_context(self, parent_context, comp_for):
return CompForContext(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
comp_for = comp_fors[0]
is_async = comp_for.parent.type == 'comp_for'
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.infer_node(input_node)
cn = ContextualizedNode(parent_context, input_node)
iterated = input_types.iterate(cn, is_async=is_async)
exprlist = comp_for.children[1]
for i, lazy_value in enumerate(iterated):
types = lazy_value.infer()
dct = unpack_tuple_to_dict(parent_context, types, exprlist)
context = self._get_comp_for_context(
parent_context,
comp_for,
)
with context.predefine_names(comp_for, dct):
try:
yield from self._nested(comp_fors[1:], context)
except IndexError:
iterated = context.infer_node(self._entry_node)
if self.array_type == 'dict':
yield iterated, context.infer_node(self._value_node)
else:
yield iterated
@inference_state_method_cache(default=[])
@to_list
def _iterate(self):
comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))
yield from self._nested(comp_fors)
def py__iter__(self, contextualized_node=None):
for set_ in self._iterate():
yield LazyKnownValues(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node)
| ComprehensionMixin |
python | run-llama__llama_index | llama-index-core/llama_index/core/llms/mock.py | {
"start": 638,
"end": 2945
} | class ____(CustomLLM):
max_tokens: Optional[int]
def __init__(
self,
max_tokens: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[MessagesToPromptType] = None,
completion_to_prompt: Optional[CompletionToPromptType] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
) -> None:
super().__init__(
max_tokens=max_tokens,
callback_manager=callback_manager or CallbackManager([]),
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
)
@classmethod
def class_name(cls) -> str:
return "MockLLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(num_output=self.max_tokens or -1)
def _generate_text(self, length: int) -> str:
return " ".join(["text" for _ in range(length)])
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response_text = (
self._generate_text(self.max_tokens) if self.max_tokens else prompt
)
return CompletionResponse(
text=response_text,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
if not prompt:
yield CompletionResponse(text="", delta="")
return
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
def gen_response(max_tokens: int) -> CompletionResponseGen:
for i in range(max_tokens):
response_text = self._generate_text(i)
yield CompletionResponse(
text=response_text,
delta="text ",
)
return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
| MockLLM |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/k_nearest_neighbors.py | {
"start": 119,
"end": 1265
} | class ____():
""" K Nearest Neighbors classifier.
Parameters:
-----------
k: int
The number of closest neighbors that will determine the class of the
sample that we wish to predict.
"""
def __init__(self, k=5):
self.k = k
def _vote(self, neighbor_labels):
""" Return the most common class among the neighbor samples """
counts = np.bincount(neighbor_labels.astype('int'))
return counts.argmax()
def predict(self, X_test, X_train, y_train):
y_pred = np.empty(X_test.shape[0])
# Determine the class of each sample
for i, test_sample in enumerate(X_test):
# Sort the training samples by their distance to the test sample and get the K nearest
idx = np.argsort([euclidean_distance(test_sample, x) for x in X_train])[:self.k]
# Extract the labels of the K nearest neighboring training samples
k_nearest_neighbors = np.array([y_train[i] for i in idx])
# Label sample as the most common class label
y_pred[i] = self._vote(k_nearest_neighbors)
return y_pred
| KNN |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.