language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/test_runner_apps/simple/tests.py | {
"start": 659,
"end": 802
} | class ____(TestCase):
def test_1(self):
pass
def test_2(self):
pass
def test_3_test(self):
pass
| UnittestCase2 |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 6200,
"end": 7062
} | class ____(unittest.TestCase):
def test_generate_abbreviations(self):
word1 = "word"
answer1 = ['word', 'wor1', 'wo1d', 'wo2', 'w1rd', 'w1r1', 'w2d', 'w3',
'1ord', '1or1', '1o1d', '1o2', '2rd', '2r1', '3d', '4']
self.assertEqual(sorted(generate_abbreviations(word1)),
sorted(answer1))
word2 = "hello"
answer2 = ['hello', 'hell1', 'hel1o', 'hel2', 'he1lo', 'he1l1', 'he2o',
'he3', 'h1llo', 'h1ll1', 'h1l1o', 'h1l2', 'h2lo', 'h2l1',
'h3o', 'h4', '1ello', '1ell1', '1el1o', '1el2', '1e1lo',
'1e1l1', '1e2o', '1e3', '2llo', '2ll1', '2l1o', '2l2',
'3lo', '3l1', '4o', '5']
self.assertEqual(sorted(generate_abbreviations(word2)),
sorted(answer2))
| TestGenerateAbbreviations |
python | pytorch__pytorch | torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py | {
"start": 298,
"end": 3212
} | class ____(MultiProcessTestCase):
@property
def world_size(self):
return TEST_GPU_NUM
def init_pg(self, backend="nccl"):
if backend not in ["nccl", "gloo", "mpi", "hccl"]:
raise RuntimeError(f"Backend {backend} not supported!")
dist.init_process_group(
backend=backend,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if backend == "nccl":
torch.cuda.set_device(self.rank)
def init_rpc(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
_transports=tp_transports()
)
rpc_backend_options.init_method = f"file://{self.file_name}"
for rank in range(self.world_size):
rpc_backend_options.set_device_map(
f"worker{rank}", {rank: self.rank, self.rank: rank}
)
rpc.init_rpc(
name=f"worker{self.rank:d}",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
def init_comms(self, init_rpc=True, backend="nccl"):
if init_rpc:
self.init_rpc()
self.init_pg(backend=backend)
def destroy_comms(self, destroy_rpc=True):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
if destroy_rpc:
rpc.shutdown()
dist.destroy_process_group()
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
def assert_sharded_tensor_equal(self, st1, st2):
st1_local_shards = st1.local_shards()
st2_local_shards = st2.local_shards()
self.assertEqual(len(st1_local_shards), len(st2_local_shards))
for i, st1_local_shard in enumerate(st1_local_shards):
self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
self.assertEqual(st1.metadata(), st2.metadata())
self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
# wrapper to initialize comms (processgroup + rpc)
def with_comms(func=None, init_rpc=True, backend="nccl"):
if func is None:
return partial(
with_comms,
init_rpc=init_rpc,
backend=backend,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if backend == "nccl" and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.init_comms(init_rpc=init_rpc, backend=backend)
func(self, *args, **kwargs)
self.destroy_comms(destroy_rpc=init_rpc)
return wrapper
| ShardedTensorTestBase |
python | pydantic__pydantic | tests/mypy/outputs/mypy-default_ini/plugin_success.py | {
"start": 5556,
"end": 6348
} | class ____:
foo: InitVar[str]
bar: str
MyDataClass(foo='foo', bar='bar')
def get_my_custom_validator(field_name: str) -> Any:
@validator(field_name, allow_reuse=True)
def my_custom_validator(cls: Any, v: int) -> int:
return v
return my_custom_validator
def foo() -> None:
class MyModel(BaseModel):
number: int
custom_validator = get_my_custom_validator('number') # type: ignore[pydantic-field]
# MYPY: error: Unused "type: ignore" comment [unused-ignore]
@model_validator(mode='before')
@classmethod
def validate_before(cls, values: Any) -> Any:
return values
@model_validator(mode='after')
def validate_after(self) -> Self:
return self
MyModel(number=2)
| MyDataClass |
python | gevent__gevent | src/gevent/pool.py | {
"start": 11662,
"end": 19702
} | class ____(GroupMappingMixin):
"""
Maintain a group of greenlets that are still running, without
limiting their number.
Links to each item and removes it upon notification.
Groups can be iterated to discover what greenlets they are tracking,
they can be tested to see if they contain a greenlet, and they know the
number (len) of greenlets they are tracking. If they are not tracking any
greenlets, they are False in a boolean context.
.. attribute:: greenlet_class
Either :class:`gevent.Greenlet` (the default) or a subclass.
These are the type of
object we will :meth:`spawn`. This can be
changed on an instance or in a subclass.
"""
greenlet_class = Greenlet
def __init__(self, *args):
assert len(args) <= 1, args
self.greenlets = set(*args)
if args:
for greenlet in args[0]:
greenlet.rawlink(self._discard)
# each item we kill we place in dying, to avoid killing the same greenlet twice
self.dying = set()
self._empty_event = Event()
self._empty_event.set()
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self.greenlets)
def __len__(self):
"""
Answer how many greenlets we are tracking. Note that if we are empty,
we are False in a boolean context.
"""
return len(self.greenlets)
def __contains__(self, item):
"""
Answer if we are tracking the given greenlet.
"""
return item in self.greenlets
def __iter__(self):
"""
Iterate across all the greenlets we are tracking, in no particular order.
"""
return iter(self.greenlets)
def add(self, greenlet):
"""
Begin tracking the *greenlet*.
If this group is :meth:`full`, then this method may block
until it is possible to track the greenlet.
Typically the *greenlet* should **not** be started when
it is added because if this object blocks in this method,
then the *greenlet* may run to completion before it is tracked.
"""
try:
rawlink = greenlet.rawlink
except AttributeError:
pass # non-Greenlet greenlet, like MAIN
else:
rawlink(self._discard)
self.greenlets.add(greenlet)
self._empty_event.clear()
def _discard(self, greenlet):
self.greenlets.discard(greenlet)
self.dying.discard(greenlet)
if not self.greenlets:
self._empty_event.set()
def discard(self, greenlet):
"""
Stop tracking the greenlet.
"""
self._discard(greenlet)
try:
unlink = greenlet.unlink
except AttributeError:
pass # non-Greenlet greenlet, like MAIN
else:
unlink(self._discard)
def start(self, greenlet):
"""
Add the **unstarted** *greenlet* to the collection of greenlets
this group is monitoring, and then start it.
"""
self.add(greenlet)
greenlet.start()
def spawn(self, *args, **kwargs): # pylint:disable=arguments-differ
"""
Begin a new greenlet with the given arguments (which are passed
to the greenlet constructor) and add it to the collection of greenlets
this group is monitoring.
:return: The newly started greenlet.
"""
greenlet = self.greenlet_class(*args, **kwargs)
self.start(greenlet)
return greenlet
# def close(self):
# """Prevents any more tasks from being submitted to the pool"""
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
def join(self, timeout=None, raise_error=False):
"""
Wait for this group to become empty *at least once*.
If there are no greenlets in the group, returns immediately.
.. note:: By the time the waiting code (the caller of this
method) regains control, a greenlet may have been added to
this group, and so this object may no longer be empty. (That
is, ``group.join(); assert len(group) == 0`` is not
guaranteed to hold.) This method only guarantees that the group
reached a ``len`` of 0 at some point.
:keyword bool raise_error: If True (*not* the default), if any
greenlet that finished while the join was in progress raised
an exception, that exception will be raised to the caller of
this method. If multiple greenlets raised exceptions, which
one gets re-raised is not determined. Only greenlets currently
in the group when this method is called are guaranteed to
be checked for exceptions.
:return bool: A value indicating whether this group became empty.
If the timeout is specified and the group did not become empty
during that timeout, then this will be a false value. Otherwise
it will be a true value.
.. versionchanged:: 1.2a1
Add the return value.
"""
greenlets = list(self.greenlets) if raise_error else ()
result = self._empty_event.wait(timeout=timeout)
for greenlet in greenlets:
if greenlet.exception is not None:
if hasattr(greenlet, '_raise_exception'):
greenlet._raise_exception()
raise greenlet.exception
return result
def kill(self, exception=GreenletExit, block=True, timeout=None):
"""
Kill all greenlets being tracked by this group.
"""
timer = Timeout._start_new_or_dummy(timeout)
try:
while self.greenlets:
for greenlet in list(self.greenlets):
if greenlet in self.dying:
continue
try:
kill = greenlet.kill
except AttributeError:
_kill(greenlet, exception)
else:
kill(exception, block=False)
self.dying.add(greenlet)
if not block:
break
joinall(self.greenlets)
except Timeout as ex:
if ex is not timer:
raise
finally:
timer.cancel()
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
"""
If the given *greenlet* is running and being tracked by this group,
kill it.
"""
if greenlet not in self.dying and greenlet in self.greenlets:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if block:
greenlet.join(timeout)
def full(self):
"""
Return a value indicating whether this group can track more greenlets.
In this implementation, because there are no limits on the number of
tracked greenlets, this will always return a ``False`` value.
"""
return False
def wait_available(self, timeout=None):
"""
Block until it is possible to :meth:`spawn` a new greenlet.
In this implementation, because there are no limits on the number
of tracked greenlets, this will always return immediately.
"""
# MappingMixin methods
def _apply_immediately(self):
# If apply() is called from one of our own
# worker greenlets, don't spawn a new one---if we're full, that
# could deadlock.
return getcurrent() in self
def _apply_async_cb_spawn(self, callback, result):
Greenlet.spawn(callback, result)
def _apply_async_use_greenlet(self):
# cannot call self.spawn() because it will block, so
# use a fresh, untracked greenlet that when run will
# (indirectly) call self.spawn() for us.
return self.full()
| Group |
python | numpy__numpy | numpy/random/tests/test_random.py | {
"start": 2403,
"end": 3808
} | class ____:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
def test_multidimensional_pvals(self):
assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
| TestMultinomial |
python | doocs__leetcode | solution/1600-1699/1671.Minimum Number of Removals to Make Mountain Array/Solution.py | {
"start": 0,
"end": 559
} | class ____:
def minimumMountainRemovals(self, nums: List[int]) -> int:
n = len(nums)
left = [1] * n
right = [1] * n
for i in range(1, n):
for j in range(i):
if nums[i] > nums[j]:
left[i] = max(left[i], left[j] + 1)
for i in range(n - 2, -1, -1):
for j in range(i + 1, n):
if nums[i] > nums[j]:
right[i] = max(right[i], right[j] + 1)
return n - max(a + b - 1 for a, b in zip(left, right) if a > 1 and b > 1)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 530975,
"end": 531292
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ProjectView", graphql_name="node")
| ProjectViewEdge |
python | scipy__scipy | scipy/optimize/tests/test__basinhopping.py | {
"start": 13552,
"end": 14152
} | class ____:
def setup_method(self):
self.stepsize = 1.0
self.N = 300000
def test_random(self):
# the mean should be 0
# the variance should be (2*stepsize)**2 / 12
# note these tests are random, they will fail from time to time
rng = np.random.RandomState(0)
x0 = np.zeros([self.N])
displace = RandomDisplacement(stepsize=self.stepsize, rng=rng)
x = displace(x0)
v = (2. * self.stepsize) ** 2 / 12
assert_almost_equal(np.mean(x), 0., 1)
assert_almost_equal(np.var(x), v, 1)
| Test_RandomDisplacement |
python | getsentry__sentry | tests/sentry/models/test_project.py | {
"start": 27397,
"end": 29960
} | class ____(TestCase):
"""
These tests validate that the project model will correctly merge the
options from the project and the project template.
When returning getting options for a project the following hierarchy is used:
- Project
- Project Template
- Default
If a project has a template option set, it will override the default.
If a project has an option set, it will override the template option.
"""
def setUp(self) -> None:
super().setUp()
self.option_key = "sentry:test_data"
self.project = self.create_project()
self.project_template = self.create_project_template(organization=self.project.organization)
self.project.template = self.project_template
def tearDown(self) -> None:
super().tearDown()
self.project_template.delete()
self.project.delete()
def test_get_option(self) -> None:
assert self.project.get_option(self.option_key) is None
ProjectOption.objects.set_value(self.project, self.option_key, True)
assert self.project.get_option(self.option_key) is True
@skip("Template feature is not active at the moment")
def test_get_template_option(self) -> None:
assert self.project.get_option(self.option_key) is None
ProjectTemplateOption.objects.set_value(self.project_template, self.option_key, "test")
assert self.project.get_option(self.option_key) == "test"
def test_get_option__override_template(self) -> None:
assert self.project.get_option(self.option_key) is None
ProjectOption.objects.set_value(self.project, self.option_key, True)
ProjectTemplateOption.objects.set_value(self.project_template, self.option_key, "test")
assert self.project.get_option(self.option_key) is True
def test_get_option__without_template(self) -> None:
self.project.template = None
assert self.project.get_option(self.option_key) is None
ProjectTemplateOption.objects.set_value(self.project_template, self.option_key, "test")
assert self.project.get_option(self.option_key) is None
def test_get_option__without_template_and_value(self) -> None:
self.project.template = None
assert self.project.get_option(self.option_key) is None
ProjectOption.objects.set_value(self.project, self.option_key, True)
ProjectTemplateOption.objects.set_value(self.project_template, self.option_key, "test")
assert self.project.get_option(self.option_key) is True
| ProjectOptionsTests |
python | pandas-dev__pandas | pandas/tests/scalar/interval/test_arithmetic.py | {
"start": 4829,
"end": 5937
} | class ____:
def test_interval_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed="right")
assert Interval(0, 1) != Interval(0, 1, closed="left")
assert Interval(0, 1) != 0
def test_interval_comparison(self):
msg = (
"'<' not supported between instances of "
"'pandas._libs.interval.Interval' and 'int'"
)
with pytest.raises(TypeError, match=msg):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_equality_comparison_broadcasts_over_array(self):
# https://github.com/pandas-dev/pandas/issues/35931
interval = Interval(0, 1)
arr = np.array([interval, interval])
result = interval == arr
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| TestIntervalComparisons |
python | redis__redis-py | redis/exceptions.py | {
"start": 1787,
"end": 1963
} | class ____(ResponseError):
"""
An error to indicate that the wrong number of args
were sent to the AUTH command
"""
pass
| AuthenticationWrongNumberOfArgsError |
python | google__jax | jax/_src/scipy/stats/kde.py | {
"start": 1069,
"end": 10240
} | class ____:
"""Gaussian Kernel Density Estimator
JAX implementation of :class:`scipy.stats.gaussian_kde`.
Parameters:
dataset: arraylike, real-valued. Data from which to estimate the distribution.
If 1D, shape is (n_data,). If 2D, shape is (n_dimensions, n_data).
bw_method: string, scalar, or callable. Either "scott", "silverman", a scalar
value, or a callable function which takes ``self`` as a parameter.
weights: arraylike, optional. Weights of the same shape as the dataset.
"""
neff: Any
dataset: Any
weights: Any
covariance: Any
inv_cov: Any
def __init__(self, dataset, bw_method=None, weights=None):
check_arraylike("gaussian_kde", dataset)
dataset = jnp.atleast_2d(dataset)
if dtypes.issubdtype(lax.dtype(dataset), np.complexfloating):
raise NotImplementedError("gaussian_kde does not support complex data")
if not dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
d, n = dataset.shape
if weights is not None:
check_arraylike("gaussian_kde", weights)
dataset, weights = promote_dtypes_inexact(dataset, weights)
weights = jnp.atleast_1d(weights)
weights /= jnp.sum(weights)
if weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(weights) != n:
raise ValueError("`weights` input should be of length n")
else:
dataset, = promote_dtypes_inexact(dataset)
weights = jnp.full(n, 1.0 / n, dtype=dataset.dtype)
self._setattr("dataset", dataset)
self._setattr("weights", weights)
neff = self._setattr("neff", 1 / jnp.sum(weights**2))
bw_method = "scott" if bw_method is None else bw_method
if bw_method == "scott":
factor = jnp.power(neff, -1. / (d + 4))
elif bw_method == "silverman":
factor = jnp.power(neff * (d + 2) / 4.0, -1. / (d + 4))
elif jnp.isscalar(bw_method) and not isinstance(bw_method, str):
factor = bw_method
elif callable(bw_method):
factor = bw_method(self)
else:
raise ValueError(
"`bw_method` should be 'scott', 'silverman', a scalar, or a callable."
)
data_covariance = jnp.atleast_2d(
jnp.cov(dataset, rowvar=1, bias=False, aweights=weights))
data_inv_cov = jnp.linalg.inv(data_covariance)
covariance = data_covariance * factor**2
inv_cov = data_inv_cov / factor**2
self._setattr("covariance", covariance)
self._setattr("inv_cov", inv_cov)
def _setattr(self, name, value):
# Frozen dataclasses don't support setting attributes so we have to
# overload that operation here as they do in the dataclass implementation
object.__setattr__(self, name, value)
return value
def tree_flatten(self):
return ((self.neff, self.dataset, self.weights, self.covariance,
self.inv_cov), None)
@classmethod
def tree_unflatten(cls, aux_data, children):
del aux_data
kde = cls.__new__(cls)
kde._setattr("neff", children[0])
kde._setattr("dataset", children[1])
kde._setattr("weights", children[2])
kde._setattr("covariance", children[3])
kde._setattr("inv_cov", children[4])
return kde
@property
def d(self):
return self.dataset.shape[0]
@property
def n(self):
return self.dataset.shape[1]
def evaluate(self, points):
"""Evaluate the Gaussian KDE on the given points."""
check_arraylike("evaluate", points)
points = self._reshape_points(points)
result = _gaussian_kernel_eval(False, self.dataset.T, self.weights[:, None],
points.T, self.inv_cov)
return result[:, 0]
def __call__(self, points):
return self.evaluate(points)
def integrate_gaussian(self, mean, cov):
"""Integrate the distribution weighted by a Gaussian."""
mean = jnp.atleast_1d(jnp.squeeze(mean))
cov = jnp.atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError(f"mean does not have dimension {self.d}")
if cov.shape != (self.d, self.d):
raise ValueError(f"covariance does not have dimension {self.d}")
chol = linalg.cho_factor(self.covariance + cov)
norm = jnp.sqrt(2 * np.pi)**self.d * jnp.prod(jnp.diag(chol[0]))
norm = 1.0 / norm
return _gaussian_kernel_convolve(chol, norm, self.dataset, self.weights,
mean)
def integrate_box_1d(self, low, high):
"""Integrate the distribution over the given limits."""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
if np.ndim(low) != 0 or np.ndim(high) != 0:
raise ValueError(
"the limits of integration in integrate_box_1d must be scalars")
sigma = jnp.squeeze(jnp.sqrt(self.covariance))
low = jnp.squeeze((low - self.dataset) / sigma)
high = jnp.squeeze((high - self.dataset) / sigma)
return jnp.sum(self.weights * (special.ndtr(high) - special.ndtr(low)))
def integrate_kde(self, other):
"""Integrate the product of two Gaussian KDE distributions."""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
chol = linalg.cho_factor(self.covariance + other.covariance)
norm = jnp.sqrt(2 * np.pi)**self.d * jnp.prod(jnp.diag(chol[0]))
norm = 1.0 / norm
sm, lg = (self, other) if self.n < other.n else (other, self)
result = api.vmap(partial(_gaussian_kernel_convolve, chol, norm, lg.dataset,
lg.weights),
in_axes=1)(sm.dataset)
return jnp.sum(result * sm.weights)
def resample(self, key, shape=()):
r"""Randomly sample a dataset from the estimated pdf
Args:
key: a PRNG key used as the random key.
shape: optional, a tuple of nonnegative integers specifying the result
batch shape; that is, the prefix of the result shape excluding the last
axis.
Returns:
The resampled dataset as an array with shape `(d,) + shape`.
"""
ind_key, eps_key = random.split(key)
ind = random.choice(ind_key, self.n, shape=shape, p=self.weights)
eps = random.multivariate_normal(eps_key,
jnp.zeros(self.d, self.covariance.dtype),
self.covariance,
shape=shape,
dtype=self.dataset.dtype).T
return self.dataset[:, ind] + eps
def pdf(self, x):
"""Probability density function"""
return self.evaluate(x)
def logpdf(self, x):
"""Log probability density function"""
check_arraylike("logpdf", x)
x = self._reshape_points(x)
result = _gaussian_kernel_eval(True, self.dataset.T, self.weights[:, None],
x.T, self.inv_cov)
return result[:, 0]
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""This method is not implemented in the JAX interface."""
del low_bounds, high_bounds, maxpts
raise NotImplementedError(
"only 1D box integrations are supported; use `integrate_box_1d`")
def set_bandwidth(self, bw_method=None):
"""This method is not implemented in the JAX interface."""
del bw_method
raise NotImplementedError(
"dynamically changing the bandwidth method is not supported")
def _reshape_points(self, points):
if dtypes.issubdtype(lax.dtype(points), np.complexfloating):
raise NotImplementedError(
"gaussian_kde does not support complex coordinates")
points = jnp.atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
points = jnp.reshape(points, (self.d, 1))
else:
raise ValueError(
"points have dimension {}, dataset has dimension {}".format(
d, self.d))
return points
def _gaussian_kernel_convolve(chol, norm, target, weights, mean):
diff = target - mean[:, None]
alpha = linalg.cho_solve(chol, diff)
arg = 0.5 * jnp.sum(diff * alpha, axis=0)
return norm * jnp.sum(jnp.exp(-arg) * weights)
@api.jit(static_argnums=0)
def _gaussian_kernel_eval(in_log, points, values, xi, precision):
points, values, xi, precision = promote_dtypes_inexact(
points, values, xi, precision)
d = points.shape[1]
if xi.shape[1] != d:
raise ValueError("points and xi must have same trailing dim")
if precision.shape != (d, d):
raise ValueError("precision matrix must match data dims")
whitening = linalg.cholesky(precision, lower=True)
points = jnp.dot(points, whitening)
xi = jnp.dot(xi, whitening)
log_norm = jnp.sum(jnp.log(
jnp.diag(whitening))) - 0.5 * d * jnp.log(2 * np.pi)
def kernel(x_test, x_train, y_train):
arg = log_norm - 0.5 * jnp.sum(jnp.square(x_train - x_test))
if in_log:
return jnp.log(y_train) + arg
else:
return y_train * jnp.exp(arg)
reduce = special.logsumexp if in_log else jnp.sum
reduced_kernel = lambda x: reduce(api.vmap(kernel, in_axes=(None, 0, 0))
(x, points, values),
axis=0)
mapped_kernel = api.vmap(reduced_kernel)
return mapped_kernel(xi)
| gaussian_kde |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 8644,
"end": 8821
} | class ____(EllipticCurve):
name = "secp256r1"
key_size = 256
group_order = (
0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551
)
| SECP256R1 |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 44679,
"end": 46114
} | class ____(DataClassJsonMixin):
"""
Query bundle.
This dataclass contains the original query string and associated transformations.
Args:
query_str (str): the original user-specified query string.
This is currently used by all non embedding-based queries.
custom_embedding_strs (list[str]): list of strings used for embedding the query.
This is currently used by all embedding-based queries.
embedding (list[float]): the stored embedding for the query.
"""
query_str: str
# using single image path as query input
image_path: Optional[str] = None
custom_embedding_strs: Optional[List[str]] = None
embedding: Optional[List[float]] = None
@property
def embedding_strs(self) -> List[str]:
"""Use custom embedding strs if specified, otherwise use query str."""
if self.custom_embedding_strs is None:
if len(self.query_str) == 0:
return []
return [self.query_str]
else:
return self.custom_embedding_strs
@property
def embedding_image(self) -> List[ImageType]:
"""Use image path for image retrieval."""
if self.image_path is None:
return []
return [self.image_path]
def __str__(self) -> str:
"""Convert to string representation."""
return self.query_str
QueryType = Union[str, QueryBundle]
| QueryBundle |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_dunder_call.py | {
"start": 2027,
"end": 2239
} | class ____(OrderedDict):
def __init__(self, *args, **kwds):
OrderedDict.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
| CustomDict |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 85149,
"end": 85625
} | class ____:
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
| TestUnique |
python | scipy__scipy | scipy/optimize/tests/test__remove_redundancy.py | {
"start": 6663,
"end": 6883
} | class ____(RRCommonTests):
def rr(self, A, b):
rr_res = _remove_redundancy_pivot_sparse(csc_array(A), b)
A1, b1, status, message = rr_res
return A1.toarray(), b1, status, message
| TestRRPivotSparse |
python | google__pytype | pytype/overlays/asyncio_types_overlay.py | {
"start": 454,
"end": 786
} | class ____(overlay.Overlay):
"""A custom overlay for the 'asyncio' module."""
def __init__(self, ctx):
member_map = {}
if ctx.python_version <= (3, 10):
member_map["coroutine"] = CoroutineDecorator.make
ast = ctx.loader.import_name("asyncio")
super().__init__(ctx, "asyncio", member_map, ast)
| AsyncioOverlay |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 727125,
"end": 756640
} | class ____(
FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefnumber
):
r"""
StrokeWidth schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeWidth"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> StrokeWidth: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> StrokeWidth: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> StrokeWidth: ...
@overload
def bandPosition(self, _: float, /) -> StrokeWidth: ...
@overload
def bin(self, _: bool | Bin | None, /) -> StrokeWidth: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> StrokeWidth: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeWidth: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeWidth: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> StrokeWidth: ...
@overload
def field(self, _: str | RepeatRef, /) -> StrokeWidth: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> StrokeWidth: ...
@overload
def legend(self, _: Legend | None, /) -> StrokeWidth: ...
@overload
def legend(
self,
*,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columns: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
direction: Optional[SchemaBase | Orientation_T] = Undefined,
fillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[SchemaBase | LegendOrient_T] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
type: Optional[Literal["symbol", "gradient"]] = Undefined,
values: Optional[
Parameter
| SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
] = Undefined,
zindex: Optional[float] = Undefined,
) -> StrokeWidth: ...
@overload
def scale(self, _: Scale | None, /) -> StrokeWidth: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeWidth: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> StrokeWidth: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> StrokeWidth: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> StrokeWidth: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> StrokeWidth: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> StrokeWidth: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> StrokeWidth: ...
@overload
def type(self, _: StandardType_T, /) -> StrokeWidth: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| StrokeWidth |
python | Textualize__textual | docs/examples/tutorial/stopwatch05.py | {
"start": 233,
"end": 931
} | class ____(Digits):
"""A widget to display elapsed time."""
start_time = reactive(monotonic)
time = reactive(0.0)
def on_mount(self) -> None:
"""Event handler called when widget is added to the app."""
self.set_interval(1 / 60, self.update_time)
def update_time(self) -> None:
"""Method to update the time to the current time."""
self.time = monotonic() - self.start_time
def watch_time(self, time: float) -> None:
"""Called when the time attribute changes."""
minutes, seconds = divmod(time, 60)
hours, minutes = divmod(minutes, 60)
self.update(f"{hours:02,.0f}:{minutes:02.0f}:{seconds:05.2f}")
| TimeDisplay |
python | chroma-core__chroma | chromadb/api/client.py | {
"start": 15501,
"end": 16968
} | class ____(SharedSystemClient, AdminAPI):
_server: ServerAPI
def __init__(self, settings: Settings = Settings()) -> None:
super().__init__(settings)
self._server = self._system.instance(ServerAPI)
@override
def create_database(self, name: str, tenant: str = DEFAULT_TENANT) -> None:
return self._server.create_database(name=name, tenant=tenant)
@override
def get_database(self, name: str, tenant: str = DEFAULT_TENANT) -> Database:
return self._server.get_database(name=name, tenant=tenant)
@override
def delete_database(self, name: str, tenant: str = DEFAULT_TENANT) -> None:
return self._server.delete_database(name=name, tenant=tenant)
@override
def list_databases(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
) -> Sequence[Database]:
return self._server.list_databases(limit, offset, tenant=tenant)
@override
def create_tenant(self, name: str) -> None:
return self._server.create_tenant(name=name)
@override
def get_tenant(self, name: str) -> Tenant:
return self._server.get_tenant(name=name)
@classmethod
@override
def from_system(
cls,
system: System,
) -> "AdminClient":
SharedSystemClient._populate_data_from_system(system)
instance = cls(settings=system.settings)
return instance
| AdminClient |
python | docker__docker-py | docker/api/exec_api.py | {
"start": 69,
"end": 6210
} | class ____:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
environment=None, workdir=None, detach_keys=None):
"""
Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
detach_keys (str): Override the key sequence for detaching
a container. Format is a single character `[a-Z]`
or `ctrl-<value>` where `<value>` is one of:
`a-z`, `@`, `^`, `[`, `,` or `_`.
~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25'
)
if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd,
'Env': environment,
}
if workdir is not None:
if utils.version_lt(self._version, '1.35'):
raise errors.InvalidVersion(
'workdir is not supported for API version < 1.35'
)
data['WorkingDir'] = workdir
if detach_keys:
data['detachKeys'] = detach_keys
elif 'detachKeys' in self._general_configs:
data['detachKeys'] = self._general_configs['detachKeys']
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False, demux=False):
"""
Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Return response data progressively as an iterator
of strings, rather than a single string.
socket (bool): Return the connection socket to allow custom
read/write operations. Must be closed by the caller when done.
demux (bool): Return stdout and stderr separately
Returns:
(generator or str or tuple): If ``stream=True``, a generator
yielding response chunks. If ``socket=True``, a socket object for
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# we want opened socket if socket == True
data = {
'Tty': tty,
'Detach': detach
}
headers = {} if detach else {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
res = self._post_json(
self._url('/exec/{0}/start', exec_id),
headers=headers,
data=data,
stream=True
)
if detach:
try:
return self._result(res)
finally:
res.close()
if socket:
return self._get_raw_response_socket(res)
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
if stream:
return CancellableStream(output, res)
else:
return output
| ExecApiMixin |
python | kamyu104__LeetCode-Solutions | Python/maximum-partition-factor.py | {
"start": 79,
"end": 1844
} | class ____(object):
def maxPartitionFactor(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
class UnionFind(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
self.parity = [0]*n # added
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
y = stk.pop()
self.parity[y] ^= self.parity[self.set[y]] # added
self.set[y] = x
return x
def union_set(self, x, y):
ox, oy = x, y # added
x, y = self.find_set(x), self.find_set(y)
if x == y:
return self.parity[ox] != self.parity[oy] # modified
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
ox, oy = oy, ox # added
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
self.set[x] = self.set[y]
self.parity[x] = self.parity[ox]^self.parity[oy]^1 # added
return True
def dist(u, v):
return abs(points[u][0]-points[v][0])+abs(points[u][1]-points[v][1])
sorted_dists = sorted((dist(u, v), u, v) for u in xrange(len(points)) for v in xrange(u+1, len(points)))
uf = UnionFind(len(points))
return next((d for d, u, v in sorted_dists if not uf.union_set(u, v)), 0)
# Time: O(n^2 * logn)
# Space: O(n^2)
# sort, union find
| Solution |
python | huggingface__transformers | tests/utils/test_add_new_model_like.py | {
"start": 1050,
"end": 29861
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Create a temporary repo with the same structure as Transformers, with just 2 models.
"""
cls.tmp_dir = tempfile.TemporaryDirectory()
cls.FAKE_REPO = cls.tmp_dir.name
os.makedirs(os.path.join(cls.FAKE_REPO, "src", "transformers", "models"), exist_ok=True)
os.makedirs(os.path.join(cls.FAKE_REPO, "tests", "models"), exist_ok=True)
os.makedirs(os.path.join(cls.FAKE_REPO, "docs", "source", "en", "model_doc"), exist_ok=True)
# We need to copy the utils to run the cleanup commands
utils_src = os.path.join(REPO_PATH, "utils")
shutil.copytree(utils_src, utils_src.replace(REPO_PATH, cls.FAKE_REPO))
# Copy the __init__ files
model_init = os.path.join(REPO_PATH, "src", "transformers", "models", "__init__.py")
shutil.copy(model_init, model_init.replace(REPO_PATH, cls.FAKE_REPO))
doc_toc = os.path.join(REPO_PATH, "docs", "source", "en", "_toctree.yml")
shutil.copy(doc_toc, doc_toc.replace(REPO_PATH, cls.FAKE_REPO))
# We need the pyproject for ruff as well
pyproject = os.path.join(REPO_PATH, "pyproject.toml")
shutil.copy(pyproject, pyproject.replace(REPO_PATH, cls.FAKE_REPO))
# Copy over all the specific model files
for model in MODELS_TO_COPY:
model_src = os.path.join(REPO_PATH, "src", "transformers", "models", model)
shutil.copytree(model_src, model_src.replace(REPO_PATH, cls.FAKE_REPO))
test_src = os.path.join(REPO_PATH, "tests", "models", model)
shutil.copytree(test_src, test_src.replace(REPO_PATH, cls.FAKE_REPO))
if model != "auto":
doc_src = os.path.join(REPO_PATH, "docs", "source", "en", "model_doc", f"{model}.md")
shutil.copy(doc_src, doc_src.replace(REPO_PATH, cls.FAKE_REPO))
# For convenience
cls.MODEL_PATH = os.path.join(cls.FAKE_REPO, "src", "transformers", "models")
cls.TESTS_MODEL_PATH = os.path.join(cls.FAKE_REPO, "tests", "models")
cls.DOC_PATH = os.path.join(cls.FAKE_REPO, "docs", "source", "en")
@classmethod
def tearDownClass(cls):
cls.tmp_dir.cleanup()
def assertFileIsEqual(self, text: str, filepath: str):
with open(filepath, "r") as f:
file_text = f.read()
self.assertEqual(file_text.strip(), text.strip())
def assertInFile(self, text: str, filepath: str):
with open(filepath, "r") as f:
file_text = f.read()
self.assertTrue(text in file_text)
def test_llama_without_tokenizers(self):
# This is the structure without adding the tokenizers
filenames_to_add = (
("configuration_llama.py", True),
("modeling_llama.py", True),
("tokenization_llama.py", False),
("tokenization_llama_fast.py", False),
("image_processing_llama.py", False),
("image_processing_llama_fast.py", False),
("video_processing_llama.py", False),
("feature_extraction_llama.py", False),
("processing_llama.py", False),
)
# Run the command
_add_new_model_like_internal(
repo_path=Path(self.FAKE_REPO),
old_model_infos=ModelInfos("llama"),
new_lowercase_name="my_test",
new_model_paper_name="MyTest",
filenames_to_add=filenames_to_add,
create_fast_image_processor=False,
)
# First assert that all files were created correctly
model_repo = os.path.join(self.MODEL_PATH, "my_test")
tests_repo = os.path.join(self.TESTS_MODEL_PATH, "my_test")
self.assertTrue(os.path.isfile(os.path.join(model_repo, "modular_my_test.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "modeling_my_test.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "configuration_my_test.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "__init__.py")))
self.assertTrue(os.path.isfile(os.path.join(self.DOC_PATH, "model_doc", "my_test.md")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "__init__.py")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_modeling_my_test.py")))
# Now assert the correct imports/auto mappings/toctree were added
self.assertInFile(
"from .my_test import *\n",
os.path.join(self.MODEL_PATH, "__init__.py"),
)
self.assertInFile(
'("my_test", "MyTestConfig"),\n',
os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"),
)
self.assertInFile(
'("my_test", "MyTest"),\n',
os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"),
)
self.assertInFile(
'("my_test", "MyTestModel"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test", "MyTestForCausalLM"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test", "MyTestForSequenceClassification"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test", "MyTestForQuestionAnswering"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test", "MyTestForTokenClassification"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
"- local: model_doc/my_test\n title: MyTest\n",
os.path.join(self.DOC_PATH, "_toctree.yml"),
)
# Check some exact file creation. For model definition, only check modular as modeling/config/etc... are created
# directly from it
EXPECTED_MODULAR = textwrap.dedent(
f"""
# coding=utf-8
# Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
)
class MyTestConfig(LlamaConfig):
pass
class MyTestRMSNorm(LlamaRMSNorm):
pass
class MyTestRotaryEmbedding(LlamaRotaryEmbedding):
pass
class MyTestMLP(LlamaMLP):
pass
class MyTestAttention(LlamaAttention):
pass
class MyTestDecoderLayer(LlamaDecoderLayer):
pass
class MyTestPreTrainedModel(LlamaPreTrainedModel):
pass
class MyTestModel(LlamaModel):
pass
class MyTestForCausalLM(LlamaForCausalLM):
pass
class MyTestForSequenceClassification(LlamaForSequenceClassification):
pass
class MyTestForQuestionAnswering(LlamaForQuestionAnswering):
pass
class MyTestForTokenClassification(LlamaForTokenClassification):
pass
__all__ = [
"MyTestConfig",
"MyTestForCausalLM",
"MyTestModel",
"MyTestPreTrainedModel",
"MyTestForSequenceClassification",
"MyTestForQuestionAnswering",
"MyTestForTokenClassification",
]
"""
)
self.assertFileIsEqual(EXPECTED_MODULAR, os.path.join(model_repo, "modular_my_test.py"))
EXPECTED_INIT = textwrap.dedent(
f"""
# coding=utf-8
# Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_my_test import *
from .modeling_my_test import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
"""
)
self.assertFileIsEqual(EXPECTED_INIT, os.path.join(model_repo, "__init__.py"))
EXPECTED_DOC = textwrap.dedent(
f"""
<!--Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
-->
# MyTest
## Overview
The MyTest model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
<INSERT SHORT SUMMARY HERE>
The abstract from the paper is the following:
<INSERT PAPER ABSTRACT HERE>
Tips:
<INSERT TIPS ABOUT MODEL HERE>
This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
## Usage examples
<INSERT SOME NICE EXAMPLES HERE>
## MyTestConfig
[[autodoc]] MyTestConfig
## MyTestForCausalLM
[[autodoc]] MyTestForCausalLM
## MyTestModel
[[autodoc]] MyTestModel
- forward
## MyTestPreTrainedModel
[[autodoc]] MyTestPreTrainedModel
- forward
## MyTestForSequenceClassification
[[autodoc]] MyTestForSequenceClassification
## MyTestForQuestionAnswering
[[autodoc]] MyTestForQuestionAnswering
## MyTestForTokenClassification
[[autodoc]] MyTestForTokenClassification
"""
)
self.assertFileIsEqual(EXPECTED_DOC, os.path.join(self.DOC_PATH, "model_doc", "my_test.md"))
def test_phi4_with_all_processors(self):
# This is the structure without adding the tokenizers
filenames_to_add = (
("configuration_phi4_multimodal.py", True),
("modeling_phi4_multimodal.py", True),
("tokenization_phi4_multimodal.py", False),
("tokenization_phi4_multimodal_fast.py", False),
("image_processing_phi4_multimodal.py", False),
("image_processing_phi4_multimodal_fast.py", True),
("video_processing_phi4_multimodal.py", False),
("feature_extraction_phi4_multimodal.py", True),
("processing_phi4_multimodal.py", True),
)
# Run the command
_add_new_model_like_internal(
repo_path=Path(self.FAKE_REPO),
old_model_infos=ModelInfos("phi4_multimodal"),
new_lowercase_name="my_test2",
new_model_paper_name="MyTest2",
filenames_to_add=filenames_to_add,
create_fast_image_processor=False,
)
# First assert that all files were created correctly
model_repo = os.path.join(self.MODEL_PATH, "my_test2")
tests_repo = os.path.join(self.TESTS_MODEL_PATH, "my_test2")
self.assertTrue(os.path.isfile(os.path.join(model_repo, "modular_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "modeling_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "configuration_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "image_processing_my_test2_fast.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "feature_extraction_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "processing_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(model_repo, "__init__.py")))
self.assertTrue(os.path.isfile(os.path.join(self.DOC_PATH, "model_doc", "my_test2.md")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "__init__.py")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_modeling_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_feature_extraction_my_test2.py")))
self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_image_processing_my_test2.py")))
# Now assert the correct imports/auto mappings/toctree were added
self.assertInFile(
"from .my_test2 import *\n",
os.path.join(self.MODEL_PATH, "__init__.py"),
)
self.assertInFile(
'("my_test2", "MyTest2Config"),\n',
os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"),
)
self.assertInFile(
'("my_test2", "MyTest2"),\n',
os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"),
)
self.assertInFile(
'("my_test2", "MyTest2Model"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test2", "MyTest2ForCausalLM"),\n',
os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"),
)
self.assertInFile(
'("my_test2", (None, "MyTest2ImageProcessorFast")),\n',
os.path.join(self.MODEL_PATH, "auto", "image_processing_auto.py"),
)
self.assertInFile(
'("my_test2", "MyTest2FeatureExtractor"),\n',
os.path.join(self.MODEL_PATH, "auto", "feature_extraction_auto.py"),
)
self.assertInFile(
'("my_test2", "MyTest2Processor"),\n',
os.path.join(self.MODEL_PATH, "auto", "processing_auto.py"),
)
self.assertInFile(
"- local: model_doc/my_test2\n title: MyTest2\n",
os.path.join(self.DOC_PATH, "_toctree.yml"),
)
# Check some exact file creation. For model definition, only check modular as modeling/config/etc... are created
# directly from it
EXPECTED_MODULAR = textwrap.dedent(
f"""
# coding=utf-8
# Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..phi4_multimodal.configuration_phi4_multimodal import (
Phi4MultimodalAudioConfig,
Phi4MultimodalConfig,
Phi4MultimodalVisionConfig,
)
from ..phi4_multimodal.feature_extraction_phi4_multimodal import Phi4MultimodalFeatureExtractor
from ..phi4_multimodal.image_processing_phi4_multimodal_fast import (
Phi4MultimodalImageProcessorFast,
Phi4MultimodalImageProcessorKwargs,
)
from ..phi4_multimodal.modeling_phi4_multimodal import (
Phi4MultimodalAttention,
Phi4MultimodalAudioAttention,
Phi4MultimodalAudioConformerEncoderLayer,
Phi4MultimodalAudioConvModule,
Phi4MultimodalAudioDepthWiseSeparableConv1d,
Phi4MultimodalAudioEmbedding,
Phi4MultimodalAudioGluPointWiseConv,
Phi4MultimodalAudioMeanVarianceNormLayer,
Phi4MultimodalAudioMLP,
Phi4MultimodalAudioModel,
Phi4MultimodalAudioNemoConvSubsampling,
Phi4MultimodalAudioPreTrainedModel,
Phi4MultimodalAudioRelativeAttentionBias,
Phi4MultimodalDecoderLayer,
Phi4MultimodalFeatureEmbedding,
Phi4MultimodalForCausalLM,
Phi4MultimodalImageEmbedding,
Phi4MultimodalMLP,
Phi4MultimodalModel,
Phi4MultimodalPreTrainedModel,
Phi4MultimodalRMSNorm,
Phi4MultimodalRotaryEmbedding,
Phi4MultimodalVisionAttention,
Phi4MultimodalVisionEmbeddings,
Phi4MultimodalVisionEncoder,
Phi4MultimodalVisionEncoderLayer,
Phi4MultimodalVisionMLP,
Phi4MultimodalVisionModel,
Phi4MultimodalVisionMultiheadAttentionPoolingHead,
Phi4MultimodalVisionPreTrainedModel,
)
from ..phi4_multimodal.processing_phi4_multimodal import Phi4MultimodalProcessor, Phi4MultimodalProcessorKwargs
class MyTest2VisionConfig(Phi4MultimodalVisionConfig):
pass
class MyTest2AudioConfig(Phi4MultimodalAudioConfig):
pass
class MyTest2Config(Phi4MultimodalConfig):
pass
class MyTest2VisionMLP(Phi4MultimodalVisionMLP):
pass
class MyTest2VisionAttention(Phi4MultimodalVisionAttention):
pass
class MyTest2VisionEncoderLayer(Phi4MultimodalVisionEncoderLayer):
pass
class MyTest2VisionEncoder(Phi4MultimodalVisionEncoder):
pass
class MyTest2VisionPreTrainedModel(Phi4MultimodalVisionPreTrainedModel):
pass
class MyTest2VisionEmbeddings(Phi4MultimodalVisionEmbeddings):
pass
class MyTest2VisionMultiheadAttentionPoolingHead(Phi4MultimodalVisionMultiheadAttentionPoolingHead):
pass
class MyTest2VisionModel(Phi4MultimodalVisionModel):
pass
class MyTest2ImageEmbedding(Phi4MultimodalImageEmbedding):
pass
class MyTest2AudioMLP(Phi4MultimodalAudioMLP):
pass
class MyTest2AudioAttention(Phi4MultimodalAudioAttention):
pass
class MyTest2AudioDepthWiseSeparableConv1d(Phi4MultimodalAudioDepthWiseSeparableConv1d):
pass
class MyTest2AudioGluPointWiseConv(Phi4MultimodalAudioGluPointWiseConv):
pass
class MyTest2AudioConvModule(Phi4MultimodalAudioConvModule):
pass
class MyTest2AudioConformerEncoderLayer(Phi4MultimodalAudioConformerEncoderLayer):
pass
class MyTest2AudioNemoConvSubsampling(Phi4MultimodalAudioNemoConvSubsampling):
pass
class MyTest2AudioRelativeAttentionBias(Phi4MultimodalAudioRelativeAttentionBias):
pass
class MyTest2AudioMeanVarianceNormLayer(Phi4MultimodalAudioMeanVarianceNormLayer):
pass
class MyTest2AudioPreTrainedModel(Phi4MultimodalAudioPreTrainedModel):
pass
class MyTest2AudioModel(Phi4MultimodalAudioModel):
pass
class MyTest2AudioEmbedding(Phi4MultimodalAudioEmbedding):
pass
class MyTest2RMSNorm(Phi4MultimodalRMSNorm):
pass
class MyTest2MLP(Phi4MultimodalMLP):
pass
class MyTest2Attention(Phi4MultimodalAttention):
pass
class MyTest2DecoderLayer(Phi4MultimodalDecoderLayer):
pass
class MyTest2FeatureEmbedding(Phi4MultimodalFeatureEmbedding):
pass
class MyTest2PreTrainedModel(Phi4MultimodalPreTrainedModel):
pass
class MyTest2RotaryEmbedding(Phi4MultimodalRotaryEmbedding):
pass
class MyTest2Model(Phi4MultimodalModel):
pass
class MyTest2ForCausalLM(Phi4MultimodalForCausalLM):
pass
class MyTest2ImageProcessorKwargs(Phi4MultimodalImageProcessorKwargs):
pass
class MyTest2ImageProcessorFast(Phi4MultimodalImageProcessorFast):
pass
class MyTest2FeatureExtractor(Phi4MultimodalFeatureExtractor):
pass
class MyTest2ProcessorKwargs(Phi4MultimodalProcessorKwargs):
pass
class MyTest2Processor(Phi4MultimodalProcessor):
pass
__all__ = [
"MyTest2VisionConfig",
"MyTest2AudioConfig",
"MyTest2Config",
"MyTest2AudioPreTrainedModel",
"MyTest2AudioModel",
"MyTest2VisionPreTrainedModel",
"MyTest2VisionModel",
"MyTest2PreTrainedModel",
"MyTest2Model",
"MyTest2ForCausalLM",
"MyTest2ImageProcessorFast",
"MyTest2FeatureExtractor",
"MyTest2Processor",
]
"""
)
self.assertFileIsEqual(EXPECTED_MODULAR, os.path.join(model_repo, "modular_my_test2.py"))
EXPECTED_INIT = textwrap.dedent(
f"""
# coding=utf-8
# Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_my_test2 import *
from .feature_extraction_my_test2 import *
from .image_processing_my_test2_fast import *
from .modeling_my_test2 import *
from .processing_my_test2 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
"""
)
self.assertFileIsEqual(EXPECTED_INIT, os.path.join(model_repo, "__init__.py"))
EXPECTED_DOC = textwrap.dedent(
f"""
<!--Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
-->
# MyTest2
## Overview
The MyTest2 model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
<INSERT SHORT SUMMARY HERE>
The abstract from the paper is the following:
<INSERT PAPER ABSTRACT HERE>
Tips:
<INSERT TIPS ABOUT MODEL HERE>
This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
## Usage examples
<INSERT SOME NICE EXAMPLES HERE>
## MyTest2VisionConfig
[[autodoc]] MyTest2VisionConfig
## MyTest2AudioConfig
[[autodoc]] MyTest2AudioConfig
## MyTest2Config
[[autodoc]] MyTest2Config
## MyTest2AudioPreTrainedModel
[[autodoc]] MyTest2AudioPreTrainedModel
- forward
## MyTest2AudioModel
[[autodoc]] MyTest2AudioModel
- forward
## MyTest2VisionPreTrainedModel
[[autodoc]] MyTest2VisionPreTrainedModel
- forward
## MyTest2VisionModel
[[autodoc]] MyTest2VisionModel
- forward
## MyTest2PreTrainedModel
[[autodoc]] MyTest2PreTrainedModel
- forward
## MyTest2Model
[[autodoc]] MyTest2Model
- forward
## MyTest2ForCausalLM
[[autodoc]] MyTest2ForCausalLM
## MyTest2ImageProcessorFast
[[autodoc]] MyTest2ImageProcessorFast
## MyTest2FeatureExtractor
[[autodoc]] MyTest2FeatureExtractor
## MyTest2Processor
[[autodoc]] MyTest2Processor
"""
)
self.assertFileIsEqual(EXPECTED_DOC, os.path.join(self.DOC_PATH, "model_doc", "my_test2.md"))
| TestAddNewModelLike |
python | celery__celery | celery/contrib/testing/manager.py | {
"start": 703,
"end": 774
} | class ____(Exception):
"""Signifies the end of something."""
| Sentinel |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 787,
"end": 951
} | class ____(BaseMiddleware):
async def process_exception(self, request, exception):
return HttpResponse("Exception caught")
| AsyncProcessExceptionMiddleware |
python | google__pytype | pytype/tests/test_splits1.py | {
"start": 104,
"end": 21111
} | class ____(test_base.BaseTest):
"""Tests for if-splitting."""
def test_restrict_none(self):
ty = self.Infer("""
def foo(x):
y = str(x) if x else None
if y:
# y can't be None here!
return y
else:
return 123
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def foo(x) -> Union[int, str]: ...
""",
)
def test_restrict_true(self):
ty = self.Infer("""
def foo(x):
y = str(x) if x else True
if y:
return 123
else:
return y
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def foo(x) -> Union[int, str]: ...
""",
)
def test_related_variable(self):
ty = self.Infer("""
def foo(x):
# y is Union[str, None]
# z is Union[float, True]
if x:
y = str(x)
z = 1.23
else:
y = None
z = True
if y:
# We only return z when y is true, so z must be a float here.
return z
return 123
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def foo(x) -> Union[float, int]: ...
""",
)
def test_nested_conditions(self):
ty = self.Infer("""
def foo(x1, x2):
y1 = str(x1) if x1 else 0
if y1:
if x2:
return y1 # The y1 condition is still active here.
return "abc"
""")
self.assertTypesMatchPytd(
ty,
"""
def foo(x1, x2) -> str: ...
""",
)
def test_remove_condition_after_merge(self):
ty = self.Infer("""
def foo(x):
y = str(x) if x else None
if y:
# y can't be None here.
z = 123
# But y can be None here.
return y
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def foo(x) -> Union[None, str]: ...
""",
)
def test_unsatisfiable_condition(self):
# Check both sides of an "if". If unsatisfiable code is executed then
# it will result in an error due to unknown_method() and widen the return
# signature to a Union.
#
# If a constant such as 0 or 1 is directly used as the condition of an
# "if", then the compiler won't even generate bytecode for the branch
# that isn't taken. Thus the constant is first assigned to a variable and
# the variable is used as the condition. This is enough to fool the
# compiler but pytype still figures out that one path is dead.
ty = self.Infer("""
def f1(x):
c = 0
if c:
unknown_method()
return 123
else:
return "hello"
def f2(x):
c = 1
if c:
return 123
else:
unknown_method()
return "hello"
def f3(x):
c = 0
if c:
return 123
else:
return "hello"
def f4(x):
c = 1
if c:
return 123
else:
return "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
def f1(x) -> str: ...
def f2(x) -> int: ...
def f3(x) -> str: ...
def f4(x) -> int: ...
""",
)
def test_sources_propagated_through_call(self):
ty = self.Infer("""
class Foo:
def method(self):
return 1
class Bar:
def method(self):
return "x"
def foo(x):
if x:
obj = Foo()
else:
obj = Bar()
if isinstance(obj, Foo):
return obj.method()
return None
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
class Foo:
def method(self) -> int: ...
class Bar:
def method(self) -> str: ...
def foo(x) -> Union[None, int]: ...
""",
)
def test_short_circuit(self):
# Unlike normal if statement, the and/or short circuit logic does
# not appear to be optimized away by the compiler. Therefore these
# simple tests do in fact execute if-splitting logic.
ty = self.Infer("""
def int_t(x): return 1 or x
def int_f(x): return 0 and x
def str_t(x): return "s" or x
def str_f(x): return "" and x
def bool_t(x): return True or x
def bool_f(x): return False and x
def tuple_t(x): return (1, ) or x
def tuple_f(x): return () and x
def dict_f(x): return {} and x
def list_f(x): return [] and x
def set_f(x): return set() and x
def frozenset_f(x): return frozenset() and x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, List, Tuple
def int_t(x) -> int: ...
def int_f(x) -> int: ...
def str_t(x) -> str: ...
def str_f(x) -> str: ...
def bool_t(x) -> bool: ...
def bool_f(x) -> bool: ...
def tuple_t(x) -> Tuple[int]: ...
def tuple_f(x) -> Tuple[()]: ...
def dict_f(x) -> Dict[nothing, nothing]: ...
def list_f(x) -> List[nothing]: ...
def set_f(x) -> set[nothing]: ...
def frozenset_f(x) -> frozenset[nothing]: ...
""",
)
def test_dict(self):
# Dicts start out as empty, which is compatible with False and not
# compatible with True. Any operation that possibly adds an item will
# make the dict ambiguous - compatible with both True and False.
ty = self.Infer("""
def f1():
d = {}
return 123 if d else "hello"
def f2(x):
d = {}
d[x] = x
return 123 if d else "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def f1() -> str: ...
def f2(x) -> Union[int, str]: ...
""",
)
def test_dict_update(self):
ty = self.Infer("""
def f1():
d = {}
d.update({})
return 123 if d else "hello"
def f2():
d = {}
d.update({"a": 1})
return 123 if d else "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def f1() -> str: ...
def f2() -> int: ...
""",
)
def test_dict_update_from_kwargs(self):
ty = self.Infer("""
def f1():
d = {}
d.update()
return 123 if d else "hello"
def f2():
d = {}
d.update(a=1)
return 123 if d else "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
def f1() -> str: ...
def f2() -> int: ...
""",
)
def test_dict_update_wrong_count(self):
ty, _ = self.InferWithErrors("""
def f1():
d = {}
d.update({"a": 1}, {"b": 2}) # wrong-arg-count
return 123 if d else "hello"
def f2():
d = {}
d.update({"a": 1}, {"b": 2}, c=3) # wrong-arg-count
return 123 if d else "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def f1() -> Union[str, int]: ...
def f2() -> Union[str, int]: ...
""",
)
def test_dict_update_wrong_type(self):
ty, _ = self.InferWithErrors("""
def f():
d = {}
d.update(1) # wrong-arg-types
return 123 if d else "hello"
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> int | str: ...
""",
)
def test_isinstance(self):
ty = self.Infer("""
# Always returns a bool.
def sig(x): return isinstance(x, str)
# Cases where isinstance() can be determined, if-split will
# narrow the return to a single type.
def d1(): return "y" if isinstance("s", str) else 0
def d2(): return "y" if isinstance("s", object) else 0
def d3(): return "y" if isinstance("s", int) else 0
def d4(): return "y" if isinstance("s", (float, str)) else 0
# Cases where isinstance() is ambiguous.
def a1(x): return "y" if isinstance(x, str) else 0
def a2(x):
cls = int if __random__ else str
return "y" if isinstance("a", cls) else 0
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def sig(x) -> bool: ...
def d1() -> str: ...
def d2() -> str: ...
def d3() -> int: ...
def d4() -> str: ...
def a1(x) -> Union[int, str]: ...
def a2(x) -> Union[int, str]: ...
""",
)
def test_is_subclass(self):
ty = self.Infer("""
# Always return a bool
def sig(x): return issubclass(x, object)
# Classes for testing
class A: pass
class B(A): pass
class C: pass
# Check the if-splitting based on issubclass
def d1(): return "y" if issubclass(B, A) else 0
def d2(): return "y" if issubclass(B, object) else 0
def d3(): return "y" if issubclass(B, C) else 0
def d4(): return "y" if issubclass(B, (C, A)) else 0
def d5(): return "y" if issubclass(B, ((C, str), A, (int, object))) else 0
def d6(): return "y" if issubclass(B, ((C, str), int, (float, A))) else 0
# Ambiguous results
def a1(x): return "y" if issubclass(x, A) else 0
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def sig(x) -> bool: ...
def d1() -> str: ...
def d2() -> str: ...
def d3() -> int: ...
def d4() -> str: ...
def d5() -> str: ...
def d6() -> str: ...
def a1(x) -> Union[int, str]: ...
class A:
pass
class B(A):
pass
class C:
pass
""",
)
def test_hasattr_builtin(self):
ty = self.Infer("""
# Always returns a bool.
def sig(x): return hasattr(x, "upper")
# Cases where hasattr() can be determined, if-split will
# narrow the return to a single type.
def d1(): return "y" if hasattr("s", "upper") else 0
def d2(): return "y" if hasattr("s", "foo") else 0
# We should follow the chain of superclasses
def d3(): return "y" if hasattr("s", "__repr__") else 0
# Cases where hasattr() is ambiguous.
def a1(x): return "y" if hasattr(x, "upper") else 0
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def sig(x) -> bool: ...
def d1() -> str: ...
def d2() -> int: ...
def d3() -> str: ...
def a1(x) -> Union[int, str]: ...
""",
)
def test_split(self):
ty = self.Infer("""
def f2(x):
if x:
return x
else:
return 3j
def f1(x):
y = 1 if x else 0
if y:
return f2(y)
else:
return None
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Optional, TypeVar, Union
_T0 = TypeVar("_T0")
def f2(x: _T0) -> Union[_T0, complex]: ...
def f1(x) -> Optional[int]: ...
""",
)
def test_dead_if(self):
ty = self.Infer("""
def foo(x):
x = None
if x is not None:
x.foo()
return x
""")
self.assertTypesMatchPytd(
ty,
"""
def foo(x) -> None: ...
""",
)
def test_unary_not(self):
ty = self.Infer("""
def not_t(x):
x = None
if not x:
return 1
else:
x.foo()
return "a"
def not_f(x):
x = True
if not x:
x.foo()
return 1
else:
return "a"
def not_ambiguous(x):
if not x:
return 1
else:
return "a"
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def not_t(x) -> int: ...
def not_f(x) -> str: ...
def not_ambiguous(x) -> Union[int, str]: ...
""",
)
def test_isinstance_object_without_class(self):
ty = self.Infer("""
def foo(x):
return 1 if isinstance(dict, type) else "x"
""")
self.assertTypesMatchPytd(
ty,
"""
def foo(x) -> int: ...
""",
)
def test_double_assign(self):
self.Check("""
x = 1
x = None
if x is not None:
x.foo()
""")
def test_infinite_loop(self):
self.Check("""
class A:
def __init__(self):
self.members = []
def add(self):
self.members.append(42)
class B:
def __init__(self):
self._map = {}
def _foo(self):
self._map[0] = A()
while True:
pass
def add2(self):
self._map[0].add()
b = B()
b._foo()
b.add2()
""")
def test_dict_contains(self):
"""Assert that we can determine whether a dict contains a key."""
self.Check("""
d1 = {"x": 42}
if "x" in d1:
d1["x"]
else:
d1["nonsense"] # Dead code
d2 = {}
if "x" in d2:
d2["nonsense"] # Dead code
d3 = {__any_object__: __any_object__}
if "x" in d3:
d3["x"]
else:
d3["y"]
""")
def test_dict_does_not_contain(self):
"""Assert that we can determine whether a dict does not contain a key."""
self.Check("""
d1 = {"x": 42}
if "x" not in d1:
d1["nonsense"] # Dead code
else:
d1["x"]
d2 = {}
if "x" not in d2:
pass
else:
d2["nonsense"] # Dead code
d3 = {__any_object__: __any_object__}
if "x" not in d3:
d3["y"]
else:
d3["x"]
""")
def test_dict_maybe_contains(self):
"""Test that we can handle more complex cases involving dict membership."""
ty = self.Infer("""
if __random__:
x = {"a": 1, "b": 2}
else:
x = {"b": 42j}
if "a" in x:
v1 = x["b"]
if "a" not in x:
v2 = x["b"]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
x = ... # type: Dict[str, Union[int, complex]]
v1 = ... # type: int
v2 = ... # type: complex
""",
)
def test_contains_coerce_to_bool(self):
ty = self.Infer("""
class A:
def __contains__(self, x):
return 1
class B:
def __contains__(self, x):
return 0
x1 = "" if "a" in A() else u""
x2 = 3 if "a" not in A() else 42j
y1 = 3.14 if "b" in B() else 16j
y2 = True if "b" not in B() else 4.2
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
def __contains__(self, x) -> int: ...
class B:
def __contains__(self, x) -> int: ...
x1 = ... # type: str
x2 = ... # type: complex
y1 = ... # type: complex
y2 = ... # type: bool
""",
)
def test_skip_over_midway_if(self):
ty = self.Infer("""
def f(r):
y = "foo"
if __random__:
x = True
else:
x = False
if x:
return y
else:
return None
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional
def f(r) -> Optional[str]: ...
""",
)
def test_dict_eq(self):
ty = self.Infer("""
if __random__:
x = {"a": 1}
z = 42
else:
x = {"b": 1}
z = 42j
y = {"b": 1}
if x == y:
v1 = z
if x != y:
v2 = z
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
x = ... # type: Dict[str, int]
y = ... # type: Dict[str, int]
z = ... # type: Union[int, complex]
v1 = ... # type: complex
v2 = ... # type: Union[int, complex]
""",
)
def test_tuple_eq(self):
ty = self.Infer("""
if __random__:
x = (1,)
z = ""
else:
x = (1, 2)
z = 3.14
y = (1, 2)
if x == y:
v1 = z
if x != y:
v2 = z
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, Union
x: Tuple[int, ...]
y: Tuple[int, int]
z: Union[str, float]
v1: float
v2: str
""",
)
def test_primitive_eq(self):
ty = self.Infer("""
if __random__:
x = "a"
z = 42
else:
x = "b"
z = 3.14
y = "a"
if x == y:
v1 = z
if x != y:
v2 = z
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
x = ... # type: str
y = ... # type: str
z = ... # type: Union[int, float]
v1 = ... # type: int
v2 = ... # type: float
""",
)
def test_primitive_not_eq(self):
self.Check("""
x = "foo" if __random__ else 42
if x == "foo":
x.upper()
""")
def test_builtin_full_name_check(self):
# Don't get confused by a class named int
self.InferWithErrors("""
class int():
pass
x = "foo" if __random__ else int()
if x == "foo":
x.upper() # attribute-error
""")
def test_type_parameter_in_branch(self):
ty = self.Infer("""
if __random__:
x = {"a": 1, "b": 42}
else:
x = {"b": 42j}
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
x = ... # type: Dict[str, Union[int, complex]]
""",
)
def test_none_or_tuple(self):
# This tests the attribute retrieval code in vm.py:_get_iter
self.Check("""
foo = (0, 0)
if __random__:
foo = None
if foo:
a, b = foo
""")
def test_cmp_is_pytd_class(self):
self.Check("""
x = bool
if x is str:
name_error
if x is not bool:
name_error
""")
def test_cmp_is_tuple_type(self):
self.Check("""
x = (1,)
y = (1, 2)
z = None # type: type[tuple]
if type(x) is not type(y):
name_error
if type(x) is not z:
name_error
""")
def test_cmp_is_function_type(self):
self.Check("""
def f(): pass
def g(x): return x
if type(f) is not type(g):
name_error
""")
def test_cmp_is_interpreter_class(self):
self.Check("""
class X: pass
class Y: pass
if X is Y:
name_error
if X is not X:
name_error
""")
def test_cmp_is_class_name_collision(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class X: ...
""",
)
self.Check(
"""
import foo
class X: pass
if foo.X is X:
name_error
""",
pythonpath=[d.path],
)
def test_get_iter(self):
self.Check("""
def f():
z = (1,2) if __random__ else None
if not z:
return
x, y = z
""")
def test_list_comprehension(self):
self.Check("""
widgets = [None, 'hello']
wotsits = [x for x in widgets if x]
for x in wotsits:
x.upper()
""")
def test_primitive(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Value(int):
pass
value1 = ... # type: int
value2 = ... # type: Value
""",
)
self.CheckWithErrors(
"""
import foo
if foo.value1 == foo.value2:
name_error # name-error
""",
pythonpath=[d.path],
)
def test_list_element(self):
ty = self.Infer("""
def f():
x = None if __random__ else 42
return [x] if x else [42]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
def f() -> List[int]: ...
""",
)
def test_keep_constant(self):
self.Check("""
use_option = False
if use_option:
name_error
""")
def test_function_and_class_truthiness(self):
self.Check("""
def f(x):
return {} if x else []
def g():
return f(lambda: True).values()
def h():
return f(object).values()
""")
def test_object_truthiness(self):
ty = self.Infer("""
x = object() and True
""")
self.assertTypesMatchPytd(
ty,
"""
x: bool
""",
)
def test_override_len(self):
ty = self.Infer("""
class A:
def __len__(self):
return 42
x = A() and True
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
class A:
def __len__(self) -> int: ...
x: Union[A, bool]
""",
)
def test_container_loop(self):
self.Check("""
from typing import Optional
def f(x):
# type: (Optional[str]) -> str
lst = []
if x:
lst.append(x)
for _ in range(5):
lst.append('hello')
return lst[0]
""")
if __name__ == "__main__":
test_base.main()
| SplitTest |
python | kamyu104__LeetCode-Solutions | Python/longest-subarray-with-maximum-bitwise-and.py | {
"start": 48,
"end": 403
} | class ____(object):
def longestSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
mx = max(nums)
result, l = 1, 0
for x in nums:
if x == mx:
l += 1
result = max(result, l)
else:
l = 0
return result
| Solution |
python | google__jax | jax/_src/mesh.py | {
"start": 15117,
"end": 15337
} | class ____(threading.local):
def __init__(self):
self.stack = [EMPTY_ENV]
self.env = self.stack[-1]
thread_resources = _ThreadResourcesLocalState()
@dataclasses.dataclass(frozen=True)
| _ThreadResourcesLocalState |
python | dagster-io__dagster | examples/docs_projects/project_components_pdf_extraction/project_components_pdf_extraction/lib/pdf_extraction.py | {
"start": 185,
"end": 1885
} | class ____(dg.Scaffolder):
"""Scaffolds a PDF extraction component with configuration and example PDFs."""
def scaffold(self, request: dg.ScaffoldRequest) -> None:
"""Generate scaffold code for PdfExtraction component.
Args:
request: The scaffold request containing type name, target path, format, project root and optional params
"""
# Default configuration values
config = {
"pdf_dir": "source_pdfs",
"output_dir": "output",
"language": "eng",
"dpi": 300,
"openai_model": "gpt-4-turbo",
"validation_score": 7,
"asset_specs": [],
}
# Create the component YAML using scaffold_component
dg.scaffold_component(request, config)
@property
def description(self) -> str:
return """Scaffolds a PdfExtraction component that:
1. Processes multiple PDF documents from a directory
2. Converts PDFs to images
3. Extracts text using OCR
4. Validates extraction quality using OpenAI
Required configuration:
- pdf_dir: Directory containing PDF files to process
- output_dir: Base directory for output files
- openai_api_key: API key for OpenAI validation (uses environment variable)
Optional configuration:
- language: OCR language code (default: 'eng')
- dpi: Image DPI for PDF conversion (default: 300)
- openai_model: OpenAI model to use (default: 'gpt-4-turbo')
- validation_score: Minimum validation score threshold (default: 7)
"""
@dg.scaffold_with(PdfExtractionScaffolder)
@dataclass
| PdfExtractionScaffolder |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 15875,
"end": 16193
} | class ____(OrthogonalPolyRule):
a: Expr
def eval(self) -> Expr:
n, a, x = self.n, self.a, self.variable
return Piecewise(
(gegenbauer(n + 1, a - 1, x)/(2*(a - 1)), Ne(a, 1)),
(chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)),
(S.Zero, True))
@dataclass
| GegenbauerRule |
python | huggingface__transformers | tests/models/dots1/test_modeling_dots1.py | {
"start": 1668,
"end": 1799
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Dots1ModelTester
@require_torch_accelerator
| Dots1ModelTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 18348,
"end": 18909
} | class ____(util.MdCase):
"""Test snippet file case."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'auto_append': ['b.txt']
}
}
def test_auto_append(self):
"""Test auto append."""
self.check_markdown(
R'''
Test
''',
'''
<p>Test</p>
<p>Snippet</p>
''',
True
)
| TestSnippetsAutoAppend |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_infeed_test.py | {
"start": 920,
"end": 5607
} | class ____(test.TestCase):
def testConstructor(self):
"""Tests that the constructor can be called with different arguments."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types,
[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]])
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(shard_dimensions=[1, 0, 7])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual([p.shard_dimension
for p in i.sharding_policies], [1, 0, 7])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue()
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(
number_of_tuple_elements=2, tuple_types=[dtypes.float32])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, tuple_shapes=[[1]])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, shard_dimensions=[1])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]], shard_dimensions=[1])
def testModification(self):
"""Tests modification of the queue post-construction."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
i.set_tuple_types([dtypes.float32, dtypes.int32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_tuple_types([dtypes.float32, dtypes.float32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.float32])
i.set_tuple_shapes([[1], [2, 3]])
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
i.set_tuple_shapes([[1, 2], [3, 4]])
self.assertEqual(i.tuple_shapes, [[1, 2], [3, 4]])
with self.assertRaises(ValueError):
i.set_tuple_shapes([[1, 2]])
i.set_number_of_shards(2)
self.assertEqual(i.number_of_shards, 2)
i.set_number_of_shards(3)
self.assertEqual(i.number_of_shards, 3)
t1 = constant_op.constant(1, dtypes.int32, shape=[6])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[3, 18])
i.set_configuration_from_input_tensors([t1, t2])
self.assertEqual(i.tuple_shapes, [[6], [3, 18]])
self.assertEqual(i.tuple_types, [dtypes.int32, dtypes.float32])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[6, 18], [12]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_shard_dimensions([1, 0])
i.set_number_of_shards(3)
with self.assertRaises(ValueError):
i.set_number_of_shards(4)
def testFreezing(self):
"""Tests freezing the queue."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
t1 = constant_op.constant(1, dtypes.int32, shape=[2])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[2, 4])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
i.freeze()
i.set_number_of_shards(2)
i.set_tuple_shapes([[4, 4], [4]])
i.set_tuple_types([dtypes.float32, dtypes.int32])
i.set_shard_dimensions([0, 0])
with self.assertRaises(ValueError):
i.set_number_of_shards(1)
with self.assertRaises(ValueError):
i.set_tuple_shapes([[8, 8], [8]])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.int32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_shard_dimensions([1, 0])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
if __name__ == '__main__':
test.main()
| InfeedTest |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 14018,
"end": 14363
} | class ____(BaseModel):
"""
Schema for AssetModel used in AssetEventDagRunReference.
"""
model_config = ConfigDict(
extra="forbid",
)
name: Annotated[str, Field(title="Name")]
uri: Annotated[str, Field(title="Uri")]
extra: Annotated[dict[str, JsonValue], Field(title="Extra")]
| AssetReferenceAssetEventDagRun |
python | getsentry__sentry | src/sentry/interfaces/spans.py | {
"start": 1195,
"end": 1668
} | class ____(Interface):
"""
Contains a list of Span interfaces
"""
display_score = 1950
score = 1950
path = "spans"
@classmethod
def to_python(cls, data, **kwargs):
spans = [Span.to_python(data[i], **kwargs) for i, span in enumerate(data)]
return super().to_python({"spans": spans}, **kwargs)
def __iter__(self):
return iter(self.spans)
def to_json(self):
return [span.to_json() for span in self]
| Spans |
python | google__jax | jax/_src/earray.py | {
"start": 988,
"end": 4405
} | class ____(basearray.Array):
__slots__ = ['aval', '_data']
__hash__ = None # type: ignore[assignment]
__array_priority__ = 100
def __init__(self, aval, data):
self.aval = aval
self._data = data
def block_until_ready(self):
_ = self._data.block_until_ready()
return self
def copy_to_host_async(self):
self._data.copy_to_host_async()
def copy(self):
return EArray(self.aval, self._data.copy())
def __repr__(self):
return 'E' + repr(self._data)
def __iter__(self):
if self.ndim == 0: raise TypeError('iteration over a 0-d array')
raise NotImplementedError
# forward to aval
shape = property(lambda self: self.aval.shape) # type: ignore[assignment]
dtype = property(lambda self: self.aval.dtype) # type: ignore[assignment]
# computed from shape and dtype
ndim = property(lambda self: len(self.aval.shape)) # type: ignore[assignment]
size = property(lambda self: math.prod(self.aval.shape)) # type: ignore[assignment]
itemsize = property(lambda self: self.aval.dtype.itemsize) # type: ignore[assignment]
def __len__(self):
if self.ndim == 0: raise TypeError('len() of unsized object')
return self.shape[0]
# forward to self._data
devices = property(lambda self: self._data.devices) # type: ignore[assignment]
_committed = property(lambda self: self._data._committed)
is_fully_addressable = property(lambda self: self._data.is_fully_addressable) # type: ignore[assignment]
is_fully_replicated = property(lambda self: self._data.is_fully_replicated) # type: ignore[assignment]
delete = property(lambda self: self._data.delete) # type: ignore[assignment]
is_deleted = property(lambda self: self._data.is_deleted) # type: ignore[assignment]
on_device_size_in_bytes = property(lambda self: self._data.on_device_size_in_bytes) # type: ignore[assignment]
unsafe_buffer_pointer = property(lambda self: self._data.unsafe_buffer_pointer) # type: ignore[assignment]
# defer to extended dtype rules
@property
def sharding(self):
phys_sharding = self._data.sharding
return sharding_impls.logical_sharding(self.shape, self.dtype, phys_sharding)
@property
def committed(self):
return self._data.committed
@property
def device(self):
if isinstance(self._data.sharding, sharding_impls.SingleDeviceSharding):
return self._data.device
return self.sharding
# TODO(mattjj): not implemented below here, need more methods from ArrayImpl
def addressable_data(self, index: int) -> EArray:
raise NotImplementedError
@property
def addressable_shards(self):
raise NotImplementedError
@property
def global_shards(self):
raise NotImplementedError
# TODO(mattjj): _set_array_base_attributes
def _earray_shard_arg_handler(xs, shardings, layouts, copy_semantics):
arrs = [x._data for x in xs]
phys_shardings = [sharding_impls.physical_sharding(x.aval, sharding)
for x, sharding in zip(xs, shardings)]
# TODO(yashkatariya): `layouts` should be converted to physical layouts.
return pxla.shard_args(phys_shardings, layouts, copy_semantics, arrs)
pxla.shard_arg_handlers[EArray] = _earray_shard_arg_handler
core.pytype_aval_mappings[EArray] = lambda x: x.aval
dtypes.canonicalize_value_handlers[EArray] = lambda x: x
tree_util.dispatch_registry.register_node(
EArray, lambda x: ((x._data,), x.aval), lambda a, xs: EArray(a, xs[0]))
| EArray |
python | pytorch__pytorch | torch/fx/experimental/rewriter.py | {
"start": 4173,
"end": 5495
} | class ____(Tracer):
def trace(
self,
root: Union[torch.nn.Module, Callable],
concrete_args: Optional[dict[str, Any]] = None,
) -> Graph:
return super().trace(_rewrite(root), concrete_args)
def _rewrite(fn: Union[torch.nn.Module, Callable]) -> Union[torch.nn.Module, Callable]:
if isinstance(fn, torch.nn.Module):
# Rewrite this module's `forward` as well as the `forward`s of
# all of this module's recursive descendents. Return the new,
# rewritten module hierarchy.
def rewrite_module(m: torch.nn.Module):
class RewrittenModule(torch.nn.Module):
def __init__(self, orig):
super().__init__()
for k, v in orig.__dict__.items():
if isinstance(v, torch.nn.Module):
self.__dict__[k] = copy.copy(rewrite_module(v))
else:
self.__dict__[k] = copy.copy(v)
RewrittenModule.forward = AST_Rewriter().rewrite(
cast(FunctionType, m.forward)
)
return RewrittenModule(m)
return rewrite_module(fn)
else:
# Rewrite this single free function
return AST_Rewriter().rewrite(cast(FunctionType, fn))
| RewritingTracer |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/adls2/io_manager.py | {
"start": 963,
"end": 4808
} | class ____(UPathIOManager):
def __init__(
self,
file_system: str,
adls2_client: DataLakeServiceClient,
blob_client: BlobServiceClient,
lease_client_constructor: Union[type[DataLakeLeaseClient], type[BlobLeaseClient]],
prefix: str = "dagster",
lease_duration: int = 60,
):
if lease_duration != -1 and (lease_duration < 15 or lease_duration > 60):
raise ValueError("lease_duration must be -1 (unlimited) or between 15 and 60")
self.adls2_client = adls2_client
self.file_system_client = self.adls2_client.get_file_system_client(file_system)
# We also need a blob client to handle copying as ADLS doesn't have a copy API yet
self.blob_client = blob_client
self.blob_container_client = self.blob_client.get_container_client(file_system)
self.prefix = check.str_param(prefix, "prefix")
self.lease_client_constructor = lease_client_constructor
self.lease_duration = lease_duration
self.file_system_client.get_file_system_properties()
super().__init__(base_path=UPath(self.prefix))
def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:
parts = context.get_identifier()
run_id = parts[0]
output_parts = parts[1:]
return UPath("storage", run_id, "files", *output_parts)
def get_loading_input_log_message(self, path: UPath) -> str:
return f"Loading ADLS2 object from: {self._uri_for_path(path)}"
def get_writing_output_log_message(self, path: UPath) -> str:
return f"Writing ADLS2 object at: {self._uri_for_path(path)}"
def unlink(self, path: UPath) -> None:
file_client = self.file_system_client.get_file_client(path.as_posix())
with self._acquire_lease(file_client, is_rm=True) as lease:
file_client.delete_file(lease=lease, recursive=True)
def make_directory(self, path: UPath) -> None:
# It is not necessary to create directories in ADLS2
return None
def path_exists(self, path: UPath) -> bool:
try:
self.file_system_client.get_file_client(path.as_posix()).get_file_properties()
except ResourceNotFoundError:
return False
return True
def _uri_for_path(self, path: UPath, protocol: str = "abfss://") -> str:
return f"{protocol}{self.file_system_client.file_system_name}@{self.file_system_client.account_name}.dfs.core.windows.net/{path.as_posix()}"
@contextmanager
def _acquire_lease(self, client: Any, is_rm: bool = False) -> Iterator[str]:
lease_client = self.lease_client_constructor(client=client)
try:
# Unclear why this needs to be type-ignored
lease_client.acquire(lease_duration=self.lease_duration)
yield lease_client.id
finally:
# cannot release a lease on a file that no longer exists, so need to check
if not is_rm:
lease_client.release()
def load_from_path(self, context: InputContext, path: UPath) -> Any:
if context.dagster_type.typing_type == type(None):
return None
file = self.file_system_client.get_file_client(path.as_posix())
stream = file.download_file()
return pickle.loads(stream.readall())
def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:
if self.path_exists(path):
context.log.warning(f"Removing existing ADLS2 key: {path}")
self.unlink(path)
pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)
file = self.file_system_client.create_file(path.as_posix())
with self._acquire_lease(file) as lease:
file.upload_data(pickled_obj, lease=lease, overwrite=True)
| PickledObjectADLS2IOManager |
python | pypa__setuptools | setuptools/_scripts.py | {
"start": 6624,
"end": 8796
} | class ____(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
SetuptoolsWarning.emit(msg)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or shutil.which(clean_header)
| WindowsScriptWriter |
python | allegroai__clearml | clearml/debugging/log.py | {
"start": 2578,
"end": 2949
} | class ____(logging.Filter):
def __init__(self, min_level: int, max_level: int, name: str = "") -> None:
super(_LevelRangeFilter, self).__init__(name)
self.min_level = min_level
self.max_level = max_level
def filter(self, record: logging.LogRecord) -> bool:
return self.min_level <= record.levelno <= self.max_level
| _LevelRangeFilter |
python | ray-project__ray | python/ray/tests/test_memory_deadlock.py | {
"start": 4311,
"end": 4506
} | class ____:
def __init__(self, num_objects):
self.barrier = threading.Barrier(num_objects, timeout=30)
def wait_all_done(self):
self.barrier.wait()
@ray.remote
| BarrierActor |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial002_py310.py | {
"start": 198,
"end": 446
} | class ____(BaseModel):
username: str
full_name: str | None = None
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item, user: User):
results = {"item_id": item_id, "item": item, "user": user}
return results
| User |
python | python-markdown__markdown | tests/test_legacy.py | {
"start": 4605,
"end": 5207
} | class ____(LegacyTestCase):
location = os.path.join(parent_test_dir, 'extensions/extra')
default_kwargs = Kwargs(extensions=['extra'])
loose_def_list = Kwargs(extensions=['def_list'])
simple_def_lists = Kwargs(extensions=['def_list'])
abbr = Kwargs(extensions=['abbr'])
footnotes = Kwargs(extensions=['footnotes'])
extra_config = Kwargs(
extensions=['extra'],
extension_configs={
'extra': {
'footnotes': {
'PLACE_MARKER': '~~~placemarker~~~'
}
}
}
)
| TestExtensionsExtra |
python | openai__openai-python | src/openai/types/shared_params/comparison_filter.py | {
"start": 279,
"end": 962
} | class ____(TypedDict, total=False):
key: Required[str]
"""The key to compare against the value."""
type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
"""
Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
`nin`.
- `eq`: equals
- `ne`: not equal
- `gt`: greater than
- `gte`: greater than or equal
- `lt`: less than
- `lte`: less than or equal
- `in`: in
- `nin`: not in
"""
value: Required[Union[str, float, bool, SequenceNotStr[Union[str, float]]]]
"""
The value to compare against the attribute key; supports string, number, or
boolean types.
"""
| ComparisonFilter |
python | pytransitions__transitions | tests/test_markup.py | {
"start": 5839,
"end": 7394
} | class ____(TestMarkupMachine):
def setUp(self):
self.states = ['A', 'B', {'name': 'C',
'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]}]
self.transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'C_1'},
{'trigger': 'run', 'source': 'C_1', 'dest': 'C_3_a'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'B'}
]
# MarkupMachine cannot be imported via get_predefined as of now
# We want to be able to run these tests without (py)graphviz
self.machine_cls = HierarchicalMarkupMachine
self.num_trans = len(self.transitions)
self.num_auto = len(self.states) * 9
def test_nested_definitions(self):
states = [{'name': 'A'},
{'name': 'B'},
{'name': 'C',
'children': [
{'name': '1'},
{'name': '2'}],
'transitions': [
{'trigger': 'go',
'source': '1',
'dest': '2'}],
'initial': '2'}] # type: List[Dict]
machine = self.machine_cls(states=states, initial='A', auto_transitions=False, name='TestMachine')
markup = {k: v for k, v in machine.markup.items() if v and k != 'models'}
self.assertEqual(dict(initial='A', states=states, name='TestMachine', model_attribute='state'), markup)
@skipIf(enum is None, "enum is not available")
| TestMarkupHierarchicalMachine |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 200911,
"end": 202520
} | class ____:
_col_type = TSTZMULTIRANGE
_col_str = "TSTZMULTIRANGE"
__only_on__ = "postgresql"
# make sure we use one, steady timestamp with timezone pair
# for all parts of all these tests
_tstzs = None
_tstzs_delta = None
def tstzs(self):
# note this was hitting DST issues when these tests were using a
# live date and running on or near 2024-03-09 :). hardcoded to a
# date a few days earlier
utc_now = datetime.datetime(
2024, 3, 2, 14, 57, 50, 473566, tzinfo=datetime.timezone.utc
)
if self._tstzs is None:
lower = utc_now
upper = lower + datetime.timedelta(1)
self._tstzs = (lower, upper)
return self._tstzs
def tstzs_delta(self):
utc_now = datetime.datetime(
2024, 3, 2, 14, 57, 50, 473566, tzinfo=datetime.timezone.utc
)
if self._tstzs_delta is None:
lower = utc_now + datetime.timedelta(3)
upper = lower + datetime.timedelta(2)
self._tstzs_delta = (lower, upper)
return self._tstzs_delta
def _data_str(self):
tstzs_lower, tstzs_upper = self.tstzs()
tstzs_delta_lower, tstzs_delta_upper = self.tstzs_delta()
return "{{[{tl},{tu}), [{tdl},{tdu})}}".format(
tl=tstzs_lower,
tu=tstzs_upper,
tdl=tstzs_delta_lower,
tdu=tstzs_delta_upper,
)
def _data_obj(self):
return [
Range(*self.tstzs()),
Range(*self.tstzs_delta()),
]
| _DateTimeTZMultiRangeTests |
python | run-llama__llama_index | llama-index-integrations/storage/index_store/llama-index-storage-index-store-dynamodb/llama_index/storage/index_store/dynamodb/base.py | {
"start": 177,
"end": 1093
} | class ____(KVIndexStore):
def __init__(
self,
dynamodb_kvstore: DynamoDBKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a DynamoDBIndexStore."""
super().__init__(
kvstore=dynamodb_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
@classmethod
def from_table_name(
cls,
table_name: str,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> "DynamoDBIndexStore":
"""Load DynamoDBIndexStore from a DynamoDB table name."""
ddb_kvstore = DynamoDBKVStore.from_table_name(table_name=table_name)
return cls(
dynamodb_kvstore=ddb_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
| DynamoDBIndexStore |
python | celery__celery | t/unit/worker/test_request.py | {
"start": 1092,
"end": 2154
} | class ____:
def setup_method(self):
self.app.conf.result_serializer = 'pickle'
@self.app.task(shared=False)
def add(x, y, **kw_):
return x + y
self.add = add
@self.app.task(shared=False)
def mytask(i, **kwargs):
return i ** i
self.mytask = mytask
@self.app.task(shared=False)
def mytask_raising(i):
raise KeyError(i)
self.mytask_raising = mytask_raising
def xRequest(self, name=None, id=None, args=None, kwargs=None,
on_ack=None, on_reject=None, Request=Request, **head):
args = [1] if args is None else args
kwargs = {'f': 'x'} if kwargs is None else kwargs
on_ack = on_ack or Mock(name='on_ack')
on_reject = on_reject or Mock(name='on_reject')
message = self.TaskMessage(
name or self.mytask.name, id, args=args, kwargs=kwargs, **head
)
return Request(message, app=self.app,
on_ack=on_ack, on_reject=on_reject)
| RequestCase |
python | sphinx-doc__sphinx | sphinx/domains/__init__.py | {
"start": 908,
"end": 1759
} | class ____:
"""An ObjType is the description for a type of object that a domain can
document. In the object_types attribute of Domain subclasses, object type
names are mapped to instances of this class.
Constructor arguments:
- *lname*: localized name of the type (do not include domain name)
- *roles*: all the roles that can refer to an object of this type
- *attrs*: object attributes -- currently only "searchprio" is known,
which defines the object's priority in the full-text search index,
see :meth:`Domain.get_objects()`.
"""
known_attrs = {
'searchprio': 1,
}
def __init__(self, lname: str, /, *roles: Any, **attrs: Any) -> None:
self.lname: str = lname
self.roles: tuple[Any, ...] = roles
self.attrs: dict[str, Any] = self.known_attrs | attrs
| ObjType |
python | walkccc__LeetCode | solutions/2258. Escape the Spreading Fire/2258.py | {
"start": 0,
"end": 2216
} | class ____:
def maximumMinutes(self, grid: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
MAX = len(grid) * len(grid[0])
fireGrid = [[-1] * len(grid[0]) for _ in range(len(grid[0]))]
self._buildFireGrid(grid, fireGrid, DIRS)
ans = -1
l = 0
r = MAX
while l <= r:
m = (l + r) // 2
if self._canStayFor(grid, fireGrid, m, DIRS):
ans = m
l = m + 1
else:
r = m - 1
return 1e9 if ans == MAX else ans
def _buildFireGrid(
self,
grid: list[list[int]],
fireMinute: list[list[int]],
DIRS: list[int],
) -> None:
minuteFromFire = 0
q = collections.deque()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1: # the fire
q.append((i, j))
fireMinute[i][j] = 0
while q:
minuteFromFire += 1
for _ in range(len(q)):
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == len(grid) or y < 0 or y == len(grid[0]):
continue
if grid[x][y] == 2: # the wall
continue
if fireMinute[x][y] != -1:
continue
fireMinute[x][y] = minuteFromFire
q.append((x, y))
def _canStayFor(
self,
grid: list[list[int]],
fireMinute: list[list[int]],
minute: int, DIRS: list[int],
) -> bool:
q = collections.deque([(0, 0)]) # the start position
seen = {(0, 0)}
while q:
minute += 1
for _ in range(len(q)):
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == len(grid) or y < 0 or y == len(grid[0]):
continue
if grid[x][y] == 2: # the wall
continue
if x == len(grid) - 1 and y == len(grid[0]) - 1:
if fireMinute[x][y] != -1 and fireMinute[x][y] < minute:
continue
return True
if fireMinute[x][y] != -1 and fireMinute[x][y] <= minute:
continue
if seen[x][y]:
continue
q.append((x, y))
seen.add((x, y))
return False
| Solution |
python | ansible__ansible | lib/ansible/parsing/dataloader.py | {
"start": 1311,
"end": 21457
} | class ____:
"""
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
# optionally: dl.set_vault_secrets([('default', ansible.parsing.vault.PromptVaultSecret(...),)])
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
"""
def __init__(self) -> None:
self._basedir: str = os.path.abspath('.')
# NOTE: not effective with forks as the main copy does not get updated.
# avoids rereading files
self._FILE_CACHE: dict[str, object] = {}
# NOTE: not thread safe, also issues with forks not returning data to main proc
# so they need to be cleaned independently. See WorkerProcess for example.
# used to keep track of temp files for cleaning
self._tempfiles: set[str] = set()
# initialize the vault stuff with an empty password
# TODO: replace with a ref to something that can get the password
# a creds/auth provider
self._vault = VaultLib()
self.set_vault_secrets(None)
# TODO: since we can query vault_secrets late, we could provide this to DataLoader init
def set_vault_secrets(self, vault_secrets: list[tuple[str, PromptVaultSecret]] | None) -> None:
self._vault.secrets = vault_secrets
def load(
self,
data: str,
file_name: str | None = None, # DTFIX-FUTURE: consider deprecating this in favor of tagging Origin on data
show_content: bool = True, # DTFIX-FUTURE: consider future deprecation, but would need RedactAnnotatedSourceContext public
json_only: bool = False,
) -> t.Any:
"""Backwards compat for now"""
with _error_utils.RedactAnnotatedSourceContext.when(not show_content):
return from_yaml(data=data, file_name=file_name, json_only=json_only)
def load_from_file(self, file_name: str, cache: str = 'all', unsafe: bool = False, json_only: bool = False, trusted_as_template: bool = False) -> t.Any:
"""
Loads data from a file, which can contain either JSON or YAML.
:param file_name: The name of the file to load data from.
:param cache: Options for caching: none|all|vaulted
:param unsafe: If True, returns the parsed data as-is without deep copying.
:param json_only: If True, only loads JSON data from the file.
:return: The loaded data, optionally deep-copied for safety.
"""
# Resolve the file name
file_name = self.path_dwim(file_name)
# Log the file being loaded
display.debug("Loading data from %s" % file_name)
# Check if the file has been cached and use the cached data if available
if cache != 'none' and file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
file_data = self.get_text_file_contents(file_name)
if trusted_as_template:
file_data = TrustedAsTemplate().tag(file_data)
parsed_data = self.load(data=file_data, file_name=file_name, json_only=json_only)
# only tagging the container, used by include_vars to determine if vars should be shown or not
# this is a temporary measure until a proper data senitivity system is in place
if SourceWasEncrypted.is_tagged_on(file_data):
parsed_data = SourceWasEncrypted().tag(parsed_data)
# Cache the file contents for next time based on the cache option
if cache == 'all':
self._FILE_CACHE[file_name] = parsed_data
elif cache == 'vaulted' and SourceWasEncrypted.is_tagged_on(file_data):
self._FILE_CACHE[file_name] = parsed_data
# Return the parsed data, optionally deep-copied for safety
if unsafe:
return parsed_data
else:
return copy.deepcopy(parsed_data)
def path_exists(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.exists(path)
def is_file(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.isfile(path) or path == os.devnull
def is_directory(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.isdir(path)
def list_directory(self, path: str) -> list[str]:
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path: str) -> bool:
"""is the given path executable?"""
path = self.path_dwim(path)
return is_executable(path)
def _decrypt_if_vault_data(self, b_data: bytes) -> tuple[bytes, bool]:
"""Decrypt b_vault_data if encrypted and return b_data and the show_content flag"""
if encrypted_source := is_encrypted(b_data):
b_data = self._vault.decrypt(b_data)
return b_data, not encrypted_source
def get_text_file_contents(self, file_name: str, encoding: str | None = None) -> str:
"""
Returns an `Origin` tagged string with the content of the specified (DWIM-expanded for relative) file path, decrypting if necessary.
Callers must only specify `encoding` when the user can configure it, as error messages in that case will imply configurability.
If `encoding` is not specified, UTF-8 will be used.
"""
bytes_content, source_was_plaintext = self._get_file_contents(file_name)
if encoding is None:
encoding = 'utf-8'
help_text = 'This file must be UTF-8 encoded.'
else:
help_text = 'Ensure the correct encoding was specified.'
try:
str_content = bytes_content.decode(encoding=encoding, errors='strict')
except UnicodeDecodeError:
str_content = bytes_content.decode(encoding=encoding, errors='surrogateescape')
display.deprecated(
msg=f"File {file_name!r} could not be decoded as {encoding!r}. Invalid content has been escaped.",
version="2.23",
# obj intentionally omitted since there's no value in showing its contents
help_text=help_text,
)
if not source_was_plaintext:
str_content = SourceWasEncrypted().tag(str_content)
return AnsibleTagHelper.tag_copy(bytes_content, str_content)
def _get_file_contents(self, file_name: str) -> tuple[bytes, bool]:
"""
Reads the file contents from the given file name
If the contents are vault-encrypted, it will decrypt them and return
the decrypted data
:arg file_name: The name of the file to read. If this is a relative
path, it will be expanded relative to the basedir
:raises AnsibleFileNotFound: if the file_name does not refer to a file
:raises AnsibleParserError: if we were unable to read the file
:return: Returns a byte string of the file contents
"""
if not file_name or not isinstance(file_name, str):
raise TypeError(f"Invalid filename {file_name!r}.")
file_name = self.path_dwim(file_name)
try:
data = pathlib.Path(file_name).read_bytes()
except FileNotFoundError as ex:
# DTFIX-FUTURE: why not just let the builtin one fly?
raise AnsibleFileNotFound("Unable to retrieve file contents.", file_name=file_name) from ex
except OSError as ex:
raise AnsibleParserError(f"An error occurred while trying to read the file {file_name!r}.") from ex
data = Origin(path=file_name).tag(data)
return self._decrypt_if_vault_data(data)
def get_basedir(self) -> str:
""" returns the current basedir """
return self._basedir
def set_basedir(self, basedir: str) -> None:
""" sets the base directory, used to find files when a relative path is given """
self._basedir = os.path.abspath(basedir)
def path_dwim(self, given: str) -> str:
"""
make relative paths work like folks expect.
"""
given = unquote(given)
if given.startswith(os.path.sep) or given.startswith('~'):
path = given
else:
path = os.path.join(self._basedir, given)
return unfrackpath(path, follow=False)
def _is_role(self, path: str) -> bool:
""" imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc """
path_dirname = os.path.dirname(path)
upath = unfrackpath(path, follow=False)
untasked_paths = (
os.path.join(path, 'main.yml'),
os.path.join(path, 'main.yaml'),
os.path.join(path, 'main'),
)
tasked_paths = (
os.path.join(upath, 'tasks/main.yml'),
os.path.join(upath, 'tasks/main.yaml'),
os.path.join(upath, 'tasks/main'),
os.path.join(upath, 'meta/main.yml'),
os.path.join(upath, 'meta/main.yaml'),
os.path.join(upath, 'meta/main'),
os.path.join(path_dirname, 'tasks/main.yml'),
os.path.join(path_dirname, 'tasks/main.yaml'),
os.path.join(path_dirname, 'tasks/main'),
os.path.join(path_dirname, 'meta/main.yml'),
os.path.join(path_dirname, 'meta/main.yaml'),
os.path.join(path_dirname, 'meta/main'),
)
exists_untasked = map(os.path.exists, untasked_paths)
exists_tasked = map(os.path.exists, tasked_paths)
if RE_TASKS.search(path) and any(exists_untasked) or any(exists_tasked):
return True
return False
def path_dwim_relative(self, path: str, dirname: str, source: str, is_role: bool = False) -> str:
"""
find one file in either a role or playbook dir with or without
explicitly named dirname subdirs
Used in action plugins and lookups to find supplemental files that
could be in either place.
"""
search = []
# I have full path, nothing else needs to be looked at
if source.startswith(os.path.sep) or source.startswith('~'):
search.append(unfrackpath(source, follow=False))
else:
# base role/play path + templates/files/vars + relative filename
search.append(os.path.join(path, dirname, source))
basedir = unfrackpath(path, follow=False)
# not told if role, but detect if it is a role and if so make sure you get correct base path
if not is_role:
is_role = self._is_role(path)
if is_role and RE_TASKS.search(path):
basedir = unfrackpath(os.path.dirname(path), follow=False)
cur_basedir = self._basedir
self.set_basedir(basedir)
# resolved base role/play path + templates/files/vars + relative filename
search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
self.set_basedir(cur_basedir)
if is_role and not source.endswith(dirname):
# look in role's tasks dir w/o dirname
search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
# try to create absolute path for loader basedir + templates/files/vars + filename
search.append(unfrackpath(os.path.join(dirname, source), follow=False))
# try to create absolute path for loader basedir
search.append(unfrackpath(os.path.join(basedir, source), follow=False))
# try to create absolute path for dirname + filename
search.append(self.path_dwim(os.path.join(dirname, source)))
# try to create absolute path for filename
search.append(self.path_dwim(source))
for candidate in search:
if os.path.exists(candidate):
break
return candidate
def path_dwim_relative_stack(self, paths: list[str], dirname: str | None, source: str | None, is_role: bool = False) -> str:
"""
find one file in first path in stack taking roles into account and adding play basedir as fallback
:arg paths: A list of text strings which are the paths to look for the filename in.
:arg dirname: A text string representing a directory. The directory
is prepended to the source to form the path to search for.
:arg source: A text string which is the filename to search for
:rtype: A text string
:returns: An absolute path to the filename ``source`` if found
:raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
"""
source_root = None
if source:
source_root = source.split('/')[0]
basedir = self.get_basedir()
result = None
search = []
if source is None:
display.warning('Invalid request to find a file that matches a "null" value')
elif source and (source.startswith('~') or source.startswith(os.path.sep)):
# path is absolute, no relative needed, check existence and return source
test_path = unfrackpath(source, follow=False)
if os.path.exists(test_path):
result = test_path
else:
display.debug('evaluation_path:\n\t%s' % '\n\t'.join(paths))
for path in paths:
upath = unfrackpath(path, follow=False)
pb_base_dir = os.path.dirname(upath)
# if path is in role and 'tasks' not there already, add it into the search
if (is_role or self._is_role(path)) and pb_base_dir.endswith('/tasks'):
if dirname:
search.append(os.path.join(os.path.dirname(pb_base_dir), dirname, source))
search.append(os.path.join(pb_base_dir, source))
# don't add dirname if user already is using it in source
if dirname and source_root != dirname:
search.append(os.path.join(upath, dirname, source))
search.append(os.path.join(upath, source))
# always append basedir as last resort
# don't add dirname if user already is using it in source
if dirname and source_root != dirname:
search.append(os.path.join(basedir, dirname, source))
search.append(os.path.join(basedir, source))
display.debug('search_path:\n\t%s' % '\n\t'.join(search))
for candidate in search:
display.vvvvv('looking for "%s" at "%s"' % (source, candidate))
if os.path.exists(candidate):
result = candidate
break
if result is None:
raise AnsibleFileNotFound(file_name=source, paths=search)
return result
def _create_content_tempfile(self, content: str | bytes) -> str:
""" Create a tempfile containing defined content """
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
with os.fdopen(fd, 'wb') as f:
try:
f.write(to_bytes(content))
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
return content_tempfile
def get_real_file(self, file_path: str, decrypt: bool = True) -> str:
"""
If the file is vault encrypted return a path to a temporary decrypted file
If the file is not encrypted then the path is returned
Temporary files are cleanup in the destructor
"""
if not self.path_exists(file_path) or not self.is_file(file_path):
raise AnsibleFileNotFound(file_name=file_path)
real_path = self.path_dwim(file_path)
try:
if decrypt:
with open(real_path, 'rb') as f:
# Limit how much of the file is read since we do not know
# whether this is a vault file and therefore it could be very
# large.
if is_encrypted_file(f):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
data = Origin(path=real_path).tag(f.read())
if not self._vault.secrets:
raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % file_path)
data = self._vault.decrypt(data)
# Make a temp file
real_path = self._create_content_tempfile(data)
self._tempfiles.add(real_path)
return real_path
except OSError as ex:
raise AnsibleParserError(f"an error occurred while trying to read the file {real_path!r}.") from ex
def cleanup_tmp_file(self, file_path: str) -> None:
"""
Removes any temporary files created from a previous call to
get_real_file. file_path must be the path returned from a
previous call to get_real_file.
"""
if file_path in self._tempfiles:
os.unlink(file_path)
self._tempfiles.remove(file_path)
def cleanup_all_tmp_files(self) -> None:
"""
Removes all temporary files that DataLoader has created
NOTE: not thread safe, forks also need special handling see __init__ for details.
"""
for f in list(self._tempfiles):
try:
self.cleanup_tmp_file(f)
except Exception as e:
display.warning("Unable to cleanup temp files: %s" % to_text(e))
def find_vars_files(self, path: str, name: str, extensions: list[str] | None = None, allow_dir: bool = True) -> list[str]:
"""
Find vars files in a given path with specified name. This will find
files in a dir named <name>/ or a file called <name> ending in known
extensions.
"""
jpath = os.path.join(path, name)
found: list[str] = []
if extensions is None:
# Look for file with no extension first to find dir before file
extensions = [''] + C.YAML_FILENAME_EXTENSIONS
# add valid extensions to name
for ext in extensions:
if '.' in ext:
full_path = jpath + ext
elif ext:
full_path = '.'.join([jpath, ext])
else:
full_path = jpath
if self.path_exists(full_path):
if self.is_directory(full_path):
if allow_dir:
found.extend(self._get_dir_vars_files(full_path, extensions))
else:
continue
else:
found.append(full_path)
break
return found
def _get_dir_vars_files(self, path: str, extensions: list[str]) -> list[str]:
found = []
for spath in sorted(self.list_directory(path)):
if not spath.startswith('.') and not spath.endswith('~'): # skip hidden and backups
ext = os.path.splitext(spath)[-1]
full_spath = os.path.join(path, spath)
if self.is_directory(full_spath) and not ext: # recursive search if dir
found.extend(self._get_dir_vars_files(full_spath, extensions))
elif self.is_file(full_spath) and (not ext or ext in extensions):
# only consider files with valid extensions or no extension
found.append(full_spath)
return found
| DataLoader |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query.py | {
"start": 1166,
"end": 1269
} | class ____:
attribute = ...
def __init__(self):
self.instance = None
| AttributeTestClass3 |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/cli_commands/user_command.py | {
"start": 1445,
"end": 10276
} | class ____(Schema):
"""user collection item schema."""
id = fields.Int()
firstname = fields.Str(required=True)
lastname = fields.Str(required=True)
username = fields.Str(required=True)
email = fields.Email(required=True)
roles = fields.List(fields.Str, required=True, validate=validate.Length(min=1))
@suppress_logs_and_warning
@providers_configuration_loaded
def users_list(args):
"""List users at the command line."""
with get_application_builder() as appbuilder:
users = appbuilder.sm.get_all_users()
fields = ["id", "username", "email", "first_name", "last_name", "roles"]
AirflowConsole().print_as(
data=users, output=args.output, mapper=lambda x: {f: x.__getattribute__(f) for f in fields}
)
@cli_utils.action_cli(check_db=True)
@providers_configuration_loaded
def users_create(args):
"""Create new user in the DB."""
with get_application_builder() as appbuilder:
role = appbuilder.sm.find_role(args.role)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(f"{args.role} is not a valid role. Valid roles are: {valid_roles}")
password = _create_password(args)
if appbuilder.sm.find_user(args.username):
print(f"{args.username} already exist in the db")
return
user = appbuilder.sm.add_user(
args.username, args.firstname, args.lastname, args.email, role, password
)
if user:
print(f'User "{args.username}" created with role "{args.role}"')
else:
raise SystemExit("Failed to create user")
def _find_user(args):
if not args.username and not args.email:
raise SystemExit("Missing args: must supply one of --username or --email")
if args.username and args.email:
raise SystemExit("Conflicting args: must supply either --username or --email, but not both")
with get_application_builder() as appbuilder:
user = appbuilder.sm.find_user(username=args.username, email=args.email)
if not user:
raise SystemExit(f'User "{args.username or args.email}" does not exist')
return user
@cli_utils.action_cli
@providers_configuration_loaded
def user_reset_password(args):
"""Reset user password user from DB."""
user = _find_user(args)
password = _create_password(args)
with get_application_builder() as appbuilder:
if appbuilder.sm.reset_password(user.id, password):
print(f'User "{user.username}" password reset successfully')
else:
raise SystemExit("Failed to reset user password")
def _create_password(args):
if args.use_random_password:
password = "".join(random.choices(string.printable, k=16))
elif args.password:
password = args.password
else:
password = getpass.getpass("Password:")
password_confirmation = getpass.getpass("Repeat for confirmation:")
if password != password_confirmation:
raise SystemExit("Passwords did not match")
return password
@cli_utils.action_cli
@providers_configuration_loaded
def users_delete(args):
"""Delete user from DB."""
user = _find_user(args)
# Clear the associated user roles first.
user.roles.clear()
with get_application_builder() as appbuilder:
if appbuilder.sm.del_register_user(user):
print(f'User "{user.username}" deleted')
else:
raise SystemExit("Failed to delete user")
@cli_utils.action_cli
@providers_configuration_loaded
def users_manage_role(args, remove=False):
"""Delete or appends user roles."""
with get_application_builder() as appbuilder:
user = _find_user(args)
role = appbuilder.sm.find_role(args.role)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(f'"{args.role}" is not a valid role. Valid roles are: {valid_roles}')
if remove:
if role not in user.roles:
raise SystemExit(f'User "{user.username}" is not a member of role "{args.role}"')
user.roles = [r for r in user.roles if r != role]
appbuilder.sm.update_user(user)
print(f'User "{user.username}" removed from role "{args.role}"')
else:
if role in user.roles:
raise SystemExit(f'User "{user.username}" is already a member of role "{args.role}"')
user.roles.append(role)
appbuilder.sm.update_user(user)
print(f'User "{user.username}" added to role "{args.role}"')
@providers_configuration_loaded
def users_export(args):
"""Export all users to the json file."""
with get_application_builder() as appbuilder:
users = appbuilder.sm.get_all_users()
fields = ["id", "username", "email", "first_name", "last_name", "roles"]
# In the User model the first and last name fields have underscores,
# but the corresponding parameters in the CLI don't
def remove_underscores(s):
return re.sub("_", "", s)
users = [
{
remove_underscores(field): user.__getattribute__(field)
if field != "roles"
else [r.name for r in user.roles]
for field in fields
}
for user in users
]
with open(args.export, "w") as file:
file.write(json.dumps(users, sort_keys=True, indent=4))
print_export_output("users", users, file)
@cli_utils.action_cli
@providers_configuration_loaded
def users_import(args):
"""Import users from the json file."""
json_file = getattr(args, "import")
if not os.path.exists(json_file):
raise SystemExit(f"File '{json_file}' does not exist")
users_list = None
try:
with open(json_file) as file:
users_list = json.loads(file.read())
except ValueError as e:
raise SystemExit(f"File '{json_file}' is not valid JSON. Error: {e}")
users_created, users_updated = _import_users(users_list)
if users_created:
users_created_str = "\n\t".join(users_created)
print(f"Created the following users:\n\t{users_created_str}")
if users_updated:
users_updated_str = "\n\t".join(users_updated)
print(f"Updated the following users:\n\t{users_updated_str}")
def _import_users(users_list: list[dict[str, Any]]):
with get_application_builder() as appbuilder:
users_created = []
users_updated = []
try:
UserSchema(many=True).load(users_list)
except ValidationError as e:
msg = []
for row_num, failure in e.normalized_messages().items():
msg.append(f"[Item {row_num}]")
for key, value in failure.items():
msg.append(f"\t{key}: {value}")
msg_str = "\n".join(msg)
raise SystemExit(f"Error: Input file didn't pass validation. See below:\n{msg_str}")
for user in users_list:
roles = []
for rolename in user["roles"]:
role = appbuilder.sm.find_role(rolename)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(
f'Error: "{rolename}" is not a valid role. Valid roles are: {valid_roles}'
)
roles.append(role)
existing_user = appbuilder.sm.find_user(email=user["email"])
if existing_user:
print(f"Found existing user with email '{user['email']}'")
if existing_user.username != user["username"]:
raise SystemExit(
f"Error: Changing the username is not allowed - "
f"please delete and recreate the user with email {user['email']!r}"
)
existing_user.roles = roles
existing_user.first_name = user["firstname"]
existing_user.last_name = user["lastname"]
appbuilder.sm.update_user(existing_user)
users_updated.append(user["email"])
else:
print(f"Creating new user with email '{user['email']}'")
appbuilder.sm.add_user(
username=user["username"],
first_name=user["firstname"],
last_name=user["lastname"],
email=user["email"],
role=roles,
)
users_created.append(user["email"])
return users_created, users_updated
add_role = functools.partial(users_manage_role, remove=False)
remove_role = functools.partial(users_manage_role, remove=True)
| UserSchema |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 641660,
"end": 677450
} | class ____(AnyMarkConfig):
"""
MarkConfig schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl']
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle : dict, float, :class:`ExprRef`
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`
Height of the marks.
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle : dict, float, :class:`ExprRef`
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`
Width of the marks.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
"""
_schema = {"$ref": "#/definitions/MarkConfig"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
endAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
startAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
baseline=baseline,
blend=blend,
color=color,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
dx=dx,
dy=dy,
ellipsis=ellipsis,
endAngle=endAngle,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
lineBreak=lineBreak,
lineHeight=lineHeight,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
radius=radius,
radius2=radius2,
shape=shape,
size=size,
smooth=smooth,
startAngle=startAngle,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
y=y,
y2=y2,
**kwds,
)
| MarkConfig |
python | huggingface__transformers | src/transformers/models/jamba/modeling_jamba.py | {
"start": 33811,
"end": 35311
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: JambaConfig, layer_idx: int):
super().__init__()
num_experts = config.layers_num_experts[layer_idx] if config.layers_num_experts else 1
self.mamba = JambaMambaMixer(config=config, layer_idx=layer_idx)
ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP
self.feed_forward = ffn_layer_class(config)
self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.mamba(
hidden_states=hidden_states,
cache_params=past_key_values,
attention_mask=attention_mask,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| JambaMambaDecoderLayer |
python | anthropics__anthropic-sdk-python | src/anthropic/_exceptions.py | {
"start": 3883,
"end": 4021
} | class ____(APIStatusError):
status_code: Literal[504] = 504 # pyright: ignore[reportIncompatibleVariableOverride]
| DeadlineExceededError |
python | pytorch__pytorch | torch/_inductor/compile_fx_async.py | {
"start": 1053,
"end": 2789
} | class ____:
progression_futures: deque[Future[_WireProtocolPickledOutput]]
callback: Callable[[_WireProtocolPickledOutput], OutputCode]
post_compile_data: Optional[_PostCompileData]
def check_and_get_ready_stage(self) -> int:
"""Check if any progression stage is ready and return its index, or -1 if none are ready."""
if not self.progression_futures:
return -1
stage_index = -1
if self.post_compile_data:
for i, future in enumerate(self.progression_futures):
if future.done():
stage_index = i
return stage_index
def switch_to_progression_stage(self, stage_index: int) -> tuple[OutputCode, bool]:
"""
Switch to the specified progression stage and return the optimized output code.
Returns a tuple of (optimized_output_code, should_clear_compilation_state).
"""
future = self.progression_futures[stage_index]
assert future is not None
optimized_output_code = self.callback(future.result())
if pcd := self.post_compile_data:
optimized_output_code.post_compile(
pcd.example_inputs, pcd.constants, pcd.graph_kwargs
)
# Clear earlier progression futures to free memory
for _ in range(stage_index + 1):
self.progression_futures.popleft()
# Return whether all compilation state should be cleared
should_clear_state = not self.progression_futures
return optimized_output_code, should_clear_state
# _AsyncOutputCode handles the actual management of waiting for an
# out-of-process compile to finish and then switching over to it.
@final
| ProgressiveCompilationState |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4.py | {
"start": 23788,
"end": 23895
} | class ____(_BackendGTK):
FigureCanvas = FigureCanvasGTK4
FigureManager = FigureManagerGTK4
| _BackendGTK4 |
python | astropy__astropy | astropy/units/core.py | {
"start": 1664,
"end": 42617
} | class ____:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__: Final = 1000
def __deepcopy__(self, memo: dict[int, Any] | None) -> Self:
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self) -> str:
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
from .format import Latex
return Latex.to_string(self)
def __bytes__(self) -> bytes:
return str(self).encode("unicode_escape")
def __str__(self) -> str:
from .format import Generic
return Generic.to_string(self)
def __repr__(self) -> str:
return f'Unit("{self}")'
@cached_property
def _physical_type_id(self) -> PhysicalTypeID:
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
return tuple(zip((base.name for base in unit.bases), unit.powers))
@property
def scale(self) -> UnitScale:
"""The scale of the unit."""
return 1.0
@property
def bases(self) -> list["NamedUnit"]:
"""The bases of the unit."""
return [self]
@property
def powers(self) -> list[UnitPower]:
"""The powers of the bases of the unit."""
return [1]
def to_string(
self,
format: type["astropy.units.format.Base"] | str | None = None,
*,
deprecations: Literal["silent", "warn", "raise", "convert"] = "warn",
**kwargs,
) -> str:
r"""Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` subclass or str or None
The name of a format or a formatter class. If not
provided (or `None`), defaults to the generic format.
deprecations : {"warn", "silent", "raise", "convert"}, optional, keyword-only
Whether deprecated units should emit a warning, be handled
silently or raise an error. The "convert" option replaces
the deprecated unit if possible and emits a warning otherwise.
**kwargs
Further options forwarded to the formatter. Currently
recognized is ``fraction``, which can take the following values:
- `False` : display unit bases with negative powers as they are;
- 'inline' or `True` : use a single-line fraction;
- 'multiline' : use a multiline fraction (available for the
'latex', 'console' and 'unicode' formats only; for others,
an 'inline' fraction is used).
Raises
------
TypeError
If ``format`` is of the wrong type.
ValueError
If ``format`` or ``fraction`` are not recognized.
Examples
--------
>>> import astropy.units as u
>>> kms = u.Unit('km / s')
>>> kms.to_string() # Generic uses fraction='inline' by default
'km / s'
>>> kms.to_string('latex') # Latex uses fraction='multiline' by default
'$\\mathrm{\\frac{km}{s}}$'
>>> print(kms.to_string('unicode', fraction=False))
km s⁻¹
>>> print(kms.to_string('unicode', fraction='inline'))
km / s
>>> print(kms.to_string('unicode', fraction='multiline'))
km
──
s
"""
from .format import get_format
try:
formatter = get_format(format)
except (TypeError, ValueError) as err:
from .format import known_formats
err.add_note(known_formats())
raise err
return formatter.to_string(self, deprecations=deprecations, **kwargs)
def __format__(self, format_spec: str) -> str:
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies, ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p: UnitPowerLike) -> "CompositeUnit":
try: # Handling scalars should be as quick as possible
return CompositeUnit(1, [self], [sanitize_power(p)], _error_check=False)
except Exception:
arr = np.asanyarray(p)
p = arr.item(0)
if (arr != p).any():
raise ValueError(
"Quantities and Units may only be raised to a scalar power"
) from None
return CompositeUnit(1, [self], [sanitize_power(p)], _error_check=False)
@staticmethod
def _warn_about_operation_with_deprecated_type(op: str, other: bytes | str) -> None:
warnings.warn(
AstropyDeprecationWarning(
f"{op} involving a unit and a '{type(other).__name__}' instance are "
f"deprecated since v7.1. Convert {other!r} to a unit explicitly."
),
stacklevel=3,
)
def __truediv__(self, m):
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
if isinstance(m, (bytes, str)):
self._warn_about_operation_with_deprecated_type("divisions", m)
return self / Unit(m)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
self._warn_about_operation_with_deprecated_type("divisions", m)
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self ** (-1))
except TypeError:
if isinstance(m, np.ndarray):
raise
return NotImplemented
def __mul__(self, m):
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
if isinstance(m, (bytes, str)):
self._warn_about_operation_with_deprecated_type("products", m)
return self * Unit(m)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
self._warn_about_operation_with_deprecated_type("products", m)
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
if isinstance(m, np.ndarray):
raise
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=COPY_IF_NEEDED, subok=True)
except Exception:
if isinstance(m, np.ndarray):
raise
return NotImplemented
def __rrshift__(self, m):
warnings.warn(
">> is not implemented. Did you mean to convert "
f"to a Quantity with unit {m} using '<<'?",
AstropyWarning,
)
return NotImplemented
def __hash__(self) -> int:
return self._hash
@cached_property
def _hash(self) -> int:
return hash((self.scale, *[x.name for x in self.bases], *map(str, self.powers)))
def __getstate__(self) -> dict[str, object]:
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop("_hash", None)
state.pop("_physical_type_id", None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1.0 or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1.0 or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self) -> "astropy.units.Quantity":
return self * -1.0
def is_equivalent(self, other, equivalencies=[]):
"""Check whether this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other = Unit(other, parse_strict="silent")
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if self._physical_type_id == other._physical_type_id:
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, _, _ in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other / unit).decompose([a])
return True
except Exception:
pass
elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (
b._is_equivalent(unit) and a._is_equivalent(other)
):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
ratio = other.decompose() / unit.decompose()
try:
ratio_in_funit = ratio.decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.0)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit: UnitBase) -> str:
unit_str = unit.to_string("generic")
physical_type = unit.physical_type
if physical_type != "unknown":
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(f"{unit_str} and {other_str} are not convertible")
def get_converter(self, other, equivalencies=[]):
"""
Create a function that converts values from this unit to another.
Parameters
----------
other : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
func : callable
A callable that takes an array-like argument and returns
it converted from units of self to units of other.
Raises
------
UnitsError
If the units cannot be converted to each other.
Notes
-----
This method is used internally in `Quantity` to convert to
different units. Note that the function returned takes
and returns values, not quantities.
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.0:
# If no conversion is necessary, returns ``unit_scale_converter``
# (which is used as a check in quantity helpers).
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies)
)
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, "equivalencies"):
for funit, tunit, _, b in other.equivalencies:
if other is funit:
try:
converter = self.get_converter(tunit, equivalencies)
except Exception:
pass
else:
return lambda v: b(converter(v))
raise exc
def _to(self, other: "UnitBase") -> UnitScale:
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if self_decomposed.powers == other_decomposed.powers and all(
self_base is other_base
for (self_base, other_base) in zip(
self_decomposed.bases, other_decomposed.bases
)
):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self.get_converter(Unit(other), equivalencies)(value)
@deprecated(since="7.0", alternative="to()")
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(other, value=value, equivalencies=equivalencies)
def decompose(self, bases: Collection["UnitBase"] = ()) -> "UnitBase":
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(
self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None
):
def is_final_result(unit: UnitBase) -> bool:
# Returns True if this result contains only the expected
# units
return all(base in namespace for base in unit.bases)
unit = self.decompose()
cached = cached_results.get(unit)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[unit] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit**power
tunit_decomposed = tunit_decomposed**power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append((len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[unit] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for _, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth,
depth=depth + 1,
cached_results=cached_results,
)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append((len(subcomposed.bases), subcomposed, tunit))
if results:
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if subresults:
cached_results[unit] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units"
)
cached_results[unit] = result
raise result
cached_results[unit] = [self]
return [self]
def compose(
self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None
):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a: UnitBase, b: UnitBase) -> bool:
return any(ab in b.bases for ab in a.bases) if a.bases or b.bases else True
def has_bases_in_common_with_equiv(unit: UnitBase, other: UnitBase) -> bool:
if has_bases_in_common(unit, other):
return True
for funit, tunit, _, _ in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units: Iterable[object]) -> set[UnitBase]:
return {
tunit
for tunit in units
if (
isinstance(tunit, UnitBase)
and (include_prefix_units or not isinstance(tunit, PrefixUnit))
and has_bases_in_common_with_equiv(decomposed, tunit.decompose())
)
}
decomposed = self.decompose()
if units is None:
units = (
filter_units(self._get_units_with_same_physical_type(equivalencies))
or get_current_unit_registry().non_prefix_units
)
else:
units = filter_units(_flatten_units_collection(units))
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
return sorted(
self._compose(
equivalencies=equivalencies,
namespace=units,
max_depth=max_depth,
depth=0,
cached_results={},
),
key=lambda x: (
not is_effectively_unity(x.scale),
sum(x.powers) < 0.0,
sum(map(abs, x.powers)),
abs(x.scale),
),
)
def to_system(self, system):
"""Convert this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
With an attempt to sort simpler units to the start (see examples).
Examples
--------
>>> import astropy.units as u
>>> (u.N / u.m**2).to_system(u.si) # preference for simpler units
[Unit("Pa"), Unit("N / m2"), Unit("J / m3")]
>>> u.Pa.to_system(u.cgs)
[Unit("10 Ba"), Unit("10 P / s")]
>>> u.Ba.to_system(u.si)
[Unit("0.1 Pa"), Unit("0.1 N / m2"), Unit("0.1 J / m3")]
>>> (u.AU/u.yr).to_system(u.cgs) # preference for base units
[Unit("474047 cm / s"), Unit("474047 Gal s")]
>>> (u.m / u.s**2).to_system(u.cgs)
[Unit("100 cm / s2"), Unit("100 Gal")]
"""
return sorted(
self.decompose(bases=system.bases).compose(units=system),
key=lambda x: len(set(x.bases).difference(system.bases)),
)
@cached_property
def si(self) -> "UnitBase":
"""The unit expressed in terms of SI units."""
from . import si
return self.to_system(si)[0]
@cached_property
def cgs(self) -> "UnitBase":
"""The unit expressed in terms of CGS units."""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self) -> "astropy.units.PhysicalType":
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, _, _ in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES: Final[tuple[str, str, str]] = (
"Primary name",
"Unit definition",
"Aliases",
)
NO_EQUIV_UNITS_MSG: Final[str] = "There are no equivalent units"
def __repr__(self) -> str:
if not self:
return self.NO_EQUIV_UNITS_MSG
lines = self._process_units()
widths = list(map(len, self.HEADING_NAMES))
for line in lines:
widths = [max(w, len(col)) for w, col in zip(widths, line, strict=True)]
row_template = " {{0:<{}s}} | {{1:<{}s}} | {{2:<{}s}}".format(*widths)
return "\n".join(
[
row_template.format(*self.HEADING_NAMES),
"[",
*(f"{row_template.format(*line)} ," for line in lines),
"]",
]
)
def _repr_html_(self) -> str:
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if not self:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
heading = "".join(f"<th>{name}</th>" for name in self.HEADING_NAMES)
rows = (
f"<tr>{''.join(f'<td>{elem}</td>' for elem in row)}</tr>"
for row in self._process_units()
)
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
return f'<table style="width:50%"><tr>{heading}</tr>{"".join(rows)}</table>'
def _process_units(self) -> list[tuple[str, str, str]]:
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
return sorted(
(
unit.name,
"irreducible" if (s := str(unit.decompose())) == unit.name else s,
", ".join(unit.aliases),
)
for unit in self
)
def find_equivalent_units(
self, equivalencies=[], units=None, include_prefix_units=False
):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies,
units=units,
max_depth=1,
include_prefix_units=include_prefix_units,
)
results = {
x.bases[0] for x in results if len(x.bases) == 1 and x.powers[0] == 1
}
return self.EquivalentUnitsList(results)
def is_unity(self) -> bool:
"""Check whether the unit is unscaled and dimensionless."""
return False
def _flatten_units_collection(items: object) -> set[UnitBase]:
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif np.iterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
funit is Unit(funit)
and (tunit is None or tunit is Unit(tunit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
| UnitBase |
python | spyder-ide__spyder | spyder/plugins/statusbar/widgets/tests/test_status.py | {
"start": 1214,
"end": 1348
} | class ____(QComboBox):
def __init__(self, parent):
super().__init__(parent)
self.addItems(['foo', 'bar'])
| MyComboBox |
python | nedbat__coveragepy | coverage/types.py | {
"start": 5168,
"end": 5459
} | class ____(Protocol):
"""A DebugControl object, or something like it."""
def should(self, option: str) -> bool:
"""Decide whether to output debug information in category `option`."""
def write(self, msg: str) -> None:
"""Write a line of debug output."""
| TDebugCtl |
python | Textualize__textual | docs/examples/widgets/tabbed_content.py | {
"start": 344,
"end": 1467
} | class ____(App):
"""An example of tabbed content."""
BINDINGS = [
("l", "show_tab('leto')", "Leto"),
("j", "show_tab('jessica')", "Jessica"),
("p", "show_tab('paul')", "Paul"),
]
def compose(self) -> ComposeResult:
"""Compose app with tabbed content."""
# Footer to show keys
yield Footer()
# Add the TabbedContent widget
with TabbedContent(initial="jessica"):
with TabPane("Leto", id="leto"): # First tab
yield Markdown(LETO) # Tab content
with TabPane("Jessica", id="jessica"):
yield Markdown(JESSICA)
with TabbedContent("Paul", "Alia"):
yield TabPane("Paul", Label("First child"))
yield TabPane("Alia", Label("Second child"))
with TabPane("Paul", id="paul"):
yield Markdown(PAUL)
def action_show_tab(self, tab: str) -> None:
"""Switch to a new tab."""
self.get_child_by_type(TabbedContent).active = tab
if __name__ == "__main__":
app = TabbedApp()
app.run()
| TabbedApp |
python | pytorch__pytorch | test/dynamo/test_minifier.py | {
"start": 4150,
"end": 5175
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m_x = torch.nn.Linear(20, 20).to(device)
self.m_y = torch.nn.Linear(20, 20)
self.p_x = torch.nn.Parameter(torch.randn(20, 20).to(device))
self.p_y = torch.nn.Parameter(torch.randn(20, 20))
self.b_x = torch.nn.Buffer(torch.ones(20, 20).to(device))
self.b_y = torch.nn.Buffer(torch.ones(20, 20))
def forward(self, x, y):
return self.m_x(x) + self.p_x + self.b_x, self.m_y(y) + self.p_y + self.b_y
mod = CpuCudaModule()
@torch.compile(backend={backend_name!r})
def inner(x1, y1):
x2 = torch.randn(20, 20).to(device)
y2 = torch.randn(20, 20)
x3, y3 = mod(x1 + x2, y1 + y2)
return torch.relu(x3.cpu() + y3)
inner(torch.randn(20, 20).to(device), torch.randn(20, 20))
"""
res = self._run_full_test(run_code, "dynamo", "ReluCompileError", isolate=False)
self.assertExpectedInline(
res.minifier_module(),
"""\
| CpuCudaModule |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 13071,
"end": 23221
} | class ____(Enum):
FOREACH = auto()
BUCKETIZE = auto()
INPLACE_BUFFERS = auto()
MASKED_SCATTER_WITH_INDEX = auto()
SCAN = auto()
SORT = auto()
TUPLE_REDUCTION = auto()
PREFER_STORE_LOOP_ORDER = auto()
TRITON_TEMPLATES = auto()
REDUCE_TO_SINGLE_ELEMENT = auto()
def get_backend_features(
device: Union[torch.device, str, None],
) -> OrderedSet[BackendFeature]:
if device is None:
return OrderedSet()
init_backend_registration()
if isinstance(device, torch.device):
device_type = device.type
else:
assert isinstance(device, str), type(device)
device_type = device
device = torch.device(device_type)
scheduling_ctor = get_scheduling_for_device(device_type)
assert scheduling_ctor
scheduling = scheduling_ctor(None)
return scheduling.get_backend_features(device)
def has_backend_feature(
device: Union[torch.device, str, None], feature: BackendFeature
) -> bool:
"""See also V.graph.has_feature"""
assert isinstance(feature, BackendFeature)
return feature in get_backend_features(device)
def get_scheduling_for_device(device: str) -> Optional[SchedulingConstructor]:
return device_codegens[device].scheduling if device in device_codegens else None
def get_wrapper_codegen_for_device(
device: str, cpp_wrapper: bool = False, fx_wrapper: bool = False
) -> Optional[WrapperConstructor]:
if device in device_codegens:
wrapper_codegen_obj: DeviceCodegen = device_codegens[device]
if fx_wrapper:
return wrapper_codegen_obj.fx_wrapper_codegen
elif cpp_wrapper:
return wrapper_codegen_obj.cpp_wrapper_codegen
else:
return wrapper_codegen_obj.wrapper_codegen
return None
def get_custom_backend_pass_for_device(device: str) -> Optional[CustomGraphModulePass]:
return custom_backend_passes.get(device)
def get_custom_backend_config_for_device(device: str) -> Optional[ConfigModule]:
return custom_backend_codegen_configs.get(device)
@functools.cache
def init_backend_registration() -> None:
"""
Register the backend for different devices, including the scheduling
for kernel code generation and the host side wrapper code generation.
"""
from .cpp import CppScheduling
from .cpp_wrapper_cpu import CppWrapperCpu
from .cpp_wrapper_cpu_array_ref import CppWrapperCpuArrayRef
from .cpp_wrapper_gpu import CppWrapperGpu
from .cpp_wrapper_mps import CppWrapperMps
from .cuda_combined_scheduling import CUDACombinedScheduling
from .halide import HalideScheduling
from .mps import MetalScheduling
from .pallas import PallasScheduling
from .python_wrapper_mtia import PythonWrapperMtia
from .triton import TritonScheduling
from .wrapper import PythonWrapperCodegen
from .wrapper_fxir import WrapperFxCodegen
if get_scheduling_for_device("cpu") is None:
cpu_backends = {
"cpp": CppScheduling,
"halide": HalideScheduling,
"triton": TritonScheduling,
"pallas": PallasScheduling,
}
register_backend_for_device(
"cpu",
lambda scheduling: cpu_backends[config.cpu_backend](scheduling),
PythonWrapperCodegen,
CppWrapperCpuArrayRef
if config.aot_inductor.allow_stack_allocation
else CppWrapperCpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("cuda") is None:
# CUDACombinedScheduling combines Triton and CUDA C++ scheduling for CUDA devices via delegation
cuda_backends = {
"triton": CUDACombinedScheduling,
"halide": HalideScheduling,
"pallas": PallasScheduling,
}
register_backend_for_device(
"cuda",
lambda scheduling: cuda_backends[config.cuda_backend](scheduling),
PythonWrapperCodegen,
CppWrapperGpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("xpu") is None:
register_backend_for_device(
"xpu",
TritonScheduling,
PythonWrapperCodegen,
CppWrapperGpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("mps") is None:
register_backend_for_device(
"mps",
MetalScheduling,
PythonWrapperCodegen,
CppWrapperMps,
WrapperFxCodegen,
)
if get_scheduling_for_device("mtia") is None:
register_backend_for_device(
"mtia",
TritonScheduling,
PythonWrapperMtia,
CppWrapperGpu,
WrapperFxCodegen,
)
private_backend = torch._C._get_privateuse1_backend_name()
if (
private_backend != "privateuseone"
and get_scheduling_for_device(private_backend) is None
):
from torch.utils.backend_registration import _get_custom_mod_func
try:
device_scheduling = _get_custom_mod_func("Scheduling")
wrapper_codegen = _get_custom_mod_func("PythonWrapperCodegen")
cpp_wrapper_codegen = _get_custom_mod_func("CppWrapperCodegen")
fx_wrapper_codegen = _get_custom_mod_func("WrapperFxCodegen")
if device_scheduling and wrapper_codegen and cpp_wrapper_codegen:
register_backend_for_device(
private_backend,
device_scheduling,
wrapper_codegen,
cpp_wrapper_codegen,
fx_wrapper_codegen,
)
except RuntimeError:
pass
def index_prevent_reordering(
index: Sequence[sympy.Expr],
index_vars: Sequence[sympy.Expr],
sizes: Sequence[sympy.Expr],
) -> list[sympy.Expr]:
from ..ir import FlexibleLayout
# added contiguous index prevents reordering
return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))]
def register_device_op_overrides(
device: str, device_op_overrides: DeviceOpOverrides
) -> None:
device_op_overrides_dict[device] = device_op_overrides
def get_device_op_overrides(device: str) -> DeviceOpOverrides:
assert isinstance(device, str), type(device)
if not device_op_overrides_dict:
from . import cpu_device_op_overrides, mps_device_op_overrides # noqa: F401
from .cuda import device_op_overrides # noqa: F401
from .mtia import device_op_overrides as mtia_op_overrides # noqa: F401
from .xpu import device_op_overrides as xpu_op_overrides # noqa: F401
return device_op_overrides_dict[device]
DTYPE_TO_COMPUTATION_DTYPE: dict[torch.dtype, torch.dtype] = {
torch.bfloat16: torch.float,
torch.float16: torch.float,
**{
dtype: dtype
for dtype in [
torch.bool,
torch.float32,
torch.float64,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.uint16,
torch.uint32,
torch.uint64,
]
},
}
def deduce_output_dtype_by_name(
op_name: str,
*args: Any,
**kwargs: Any,
) -> Optional[torch.dtype]:
"""
Given op name and a list of input dtypes, deduce the output dtype
"""
if op_name in boolean_ops():
return torch.bool
elif op_name in (
"to_dtype",
"index_expr",
):
return kwargs["dtype"] if "dtype" in kwargs else args[-1]
elif op_name in (
"rand",
"randn",
):
return torch.float
elif op_name in (
"get_index",
"randint64",
"load_seed",
):
return torch.int64
elif op_name == "reduction":
return kwargs["dtype"] if "dtype" in kwargs else args[1]
elif op_name == "constant":
return kwargs["dtype"] if "dtype" in kwargs else args[-1]
elif op_name in (
"load",
"store",
"store_reduction",
):
buf_name = args[1]
return V.graph.get_dtype(buf_name) # type: ignore[arg-type]
elif op_name == "to_dtype_bitcast":
return kwargs["dtype"] if "dtype" in kwargs else args[-2]
return None
def check_dtype(
buffer: IndentedBuffer, var: CSEVariableType, dtype: torch.dtype
) -> None:
backend = get_current_backend()
if config.test_configs.runtime_triton_dtype_assert and backend == "triton":
buffer.writeline(f"tl.static_assert({var}.dtype == {triton_type(dtype)})")
elif config.test_configs.static_cpp_dtype_assert and backend == "cpp":
from .cpp_utils import CppCSEVariable, DTYPE_TO_CPP
assert isinstance(var, CppCSEVariable), type(var)
if dtype == torch.bool:
if var.is_vec:
is_same_dt = f"IsVecMaskType<decltype({var})>::value"
else:
# operator&(bool, bool) returns int and it can be used as boolean in C++
is_same_dt = f"std::is_same_v<decltype({var}), bool> || std::is_same_v<decltype({var}), int>"
else:
c_var_type = f"decltype({var})"
if var.is_vec:
c_var_type = f"typename {c_var_type}::value_type"
is_same_dt = f"std::is_same_v<{c_var_type}, {DTYPE_TO_CPP[dtype]}>"
buffer.writeline(f"static_assert({is_same_dt});")
def check_shape(
buffer: IndentedBuffer, var: CSEVariableType, shape: BlockShapeType
) -> None:
backend = get_current_backend()
assert shape is not None
if config.test_configs.runtime_triton_shape_assert and backend == "triton":
shape_str = (
", ".join(str(d) for d in shape) if len(shape) != 1 else f"{shape[0]},"
)
buffer.writeline(f"tl.static_assert({var}.shape == ({shape_str}))")
def check_nan(buffer: IndentedBuffer, var: CSEVariableType) -> None:
backend = get_current_backend()
if backend == "triton":
msg = "NaN or Inf found"
buffer.writeline(
f"tl.device_assert(({var} == {var}) & ({var} != float('inf')) & ({var} != float('-inf')), '{msg}')"
)
| BackendFeature |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 4041,
"end": 4145
} | class ____(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
| PathArgsHandler |
python | pytest-dev__pytest | src/_pytest/python_api.py | {
"start": 7815,
"end": 11087
} | class ____(ApproxBase):
"""Perform approximate comparisons where the expected value is a mapping
with numeric values (the keys can be anything)."""
def __repr__(self) -> str:
return f"approx({ ({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})"
def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]:
import math
if len(self.expected) != len(other_side):
return [
"Impossible to compare mappings with different sizes.",
f"Lengths: {len(self.expected)} and {len(other_side)}",
]
if set(self.expected.keys()) != set(other_side.keys()):
return [
"comparison failed.",
f"Mappings has different keys: expected {self.expected.keys()} but got {other_side.keys()}",
]
approx_side_as_map = {
k: self._approx_scalar(v) for k, v in self.expected.items()
}
number_of_elements = len(approx_side_as_map)
max_abs_diff = -math.inf
max_rel_diff = -math.inf
different_ids = []
for (approx_key, approx_value), other_value in zip(
approx_side_as_map.items(), other_side.values(), strict=True
):
if approx_value != other_value:
if approx_value.expected is not None and other_value is not None:
try:
max_abs_diff = max(
max_abs_diff, abs(approx_value.expected - other_value)
)
if approx_value.expected == 0.0:
max_rel_diff = math.inf
else:
max_rel_diff = max(
max_rel_diff,
abs(
(approx_value.expected - other_value)
/ approx_value.expected
),
)
except ZeroDivisionError:
pass
different_ids.append(approx_key)
message_data = [
(str(key), str(other_side[key]), str(approx_side_as_map[key]))
for key in different_ids
]
return _compare_approx(
self.expected,
message_data,
number_of_elements,
different_ids,
max_abs_diff,
max_rel_diff,
)
def __eq__(self, actual) -> bool:
try:
if set(actual.keys()) != set(self.expected.keys()):
return False
except AttributeError:
return False
return super().__eq__(actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
def _check_type(self) -> None:
__tracebackhide__ = True
for key, value in self.expected.items():
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
| ApproxMapping |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox06.py | {
"start": 315,
"end": 893
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"width": 256, "height": 100}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pyparsing__pyparsing | tests/test_simple_unit.py | {
"start": 620,
"end": 884
} | class ____(NamedTuple):
desc: str = ""
expr: pp.ParserElement = pp.Empty()
text: str = ""
parse_fn: str = "parse_string"
expected_list: Iterable = None
expected_dict: Mapping = None
expected_fail_locn: int = None
NL = "\n"
| PyparsingTest |
python | great-expectations__great_expectations | contrib/time_series_expectations/time_series_expectations/expectations/expect_column_max_to_match_prophet_date_model.py | {
"start": 207,
"end": 3827
} | class ____(ColumnAggregateTimeSeriesExpectation):
"""Expect the column maximum to match the predictions of a prophet model for a given date.
expect_column_max_to_match_prophet_date_model is a ColumnAggregateTimeSeriesExpectation.
Args:
column (str):
The name of the column to calculate the max of
date (str):
A string representing the date to compare the max to
model_json (str):
A string containing a JSON-serialized Prophet model
Keyword Args:
Other Parameters:
result_format (str or None):
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY.
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None):
If True, then catch exceptions and include them as part of the result object.
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None):
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* Prophet is an open source forecasting library created at facebook. For more information, please see the [project github page](https://github.com/facebook/prophet).
* I apologize for the dangling prepositions in the Arg docstrings for this Expectation.
"""
with open(file_relative_path(__file__, "example_prophet_date_model.json")) as f_:
example_prophet_date_model_json = f_.read()
examples = [
{
"data": {
"x": [100, 102, 101, 100],
"y": [100, 100, 100, 500],
},
"only_for": ["pandas"],
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "x",
"date": "2022-01-11",
"model": example_prophet_date_model_json,
},
"out": {
"success": True,
"observed_value": 102,
},
},
{
"title": "negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "y",
"date": "2022-01-11",
"model": example_prophet_date_model_json,
},
"out": {
"success": False,
"observed_value": 500,
},
},
],
}
]
metric_dependency = "column.max"
library_metadata = {
"tags": [],
"contributors": [
"@abegong",
],
"requirements": ["prophet"],
}
if __name__ == "__main__":
ExpectColumnMaxToMatchProphetDateModel().print_diagnostic_checklist()
| ExpectColumnMaxToMatchProphetDateModel |
python | PrefectHQ__prefect | tests/server/utilities/test_schemas.py | {
"start": 5401,
"end": 5454
} | class ____:
descr = ConcreteDescriptor()
| PlainOwner |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/higher_order_functions.py | {
"start": 3142,
"end": 4534
} | class ____:
def __init__(self):
pass
def __call__(self) -> str:
return _test_source()
def test_callable_class_to_obscure():
def obscure_tito(x): ...
c = CallableSource()
return obscure_tito(c) # Expecting taint since obscure_tito could call the callable
def test_callable_class_to_perfect_tito():
def perfect_tito(x: CallableSource) -> CallableSource:
return x
c = CallableSource()
return perfect_tito(c) # Expecting no taint since we see the body of perfect_tito
def test_duplicate_issues_in_different_parameterized_callables(f, flag: bool):
def sink_wrapper(f, arg):
_test_sink(arg)
def foo(x: str) -> None:
return
def bar(x: str) -> None:
return
x = foo
if flag:
x = bar
sink_wrapper(
x, _test_source()
) # Expect one issue instead of two, due to sharing the same traces
def sink_wrapper2(arg):
_test_sink(arg)
y = _test_sink
if flag:
y = sink_wrapper2
apply(y, _test_source()) # Expect one issue but two sink traces
# Expect no issues due to duplicating issues with the non-parameterized root callable
test_duplicate_issues_in_different_parameterized_callables(print, _test_source())
def test_callable_default_value(f = _test_source) -> None:
_test_sink(f()) # TODO(T225702991): False negative
| CallableSource |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/base_env.py | {
"start": 19562,
"end": 19926
} | class ____(Mapping):
def __init__(self, specs: Dict[BehaviorName, BehaviorSpec]):
self._dict = specs
def __len__(self) -> int:
return len(self._dict)
def __getitem__(self, behavior: BehaviorName) -> BehaviorSpec:
return self._dict[behavior]
def __iter__(self) -> Iterator[Any]:
yield from self._dict
| BehaviorMapping |
python | walkccc__LeetCode | solutions/2870. Minimum Number of Operations to Make Array Empty/2870.py | {
"start": 0,
"end": 208
} | class ____:
def minOperations(self, nums: list[int]) -> int:
count = collections.Counter(nums)
if 1 in count.values():
return -1
return sum((freq + 2) // 3 for freq in count.values())
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/message_test.py | {
"start": 122753,
"end": 123912
} | class ____(unittest.TestCase):
def GenerateNestedProto(self, n):
msg = unittest_pb2.TestRecursiveMessage()
sub = msg
for _ in range(n):
sub = sub.a
sub.i = 0
return msg.SerializeToString()
def testSucceedOkSizedProto(self):
msg = unittest_pb2.TestRecursiveMessage()
msg.ParseFromString(self.GenerateNestedProto(100))
def testAssertOversizeProto(self):
if api_implementation.Type() != 'python':
api_implementation._c_module.SetAllowOversizeProtos(False)
msg = unittest_pb2.TestRecursiveMessage()
with self.assertRaises(message.DecodeError) as context:
msg.ParseFromString(self.GenerateNestedProto(101))
self.assertIn('Error parsing message', str(context.exception))
def testSucceedOversizeProto(self):
if api_implementation.Type() == 'python':
decoder.SetRecursionLimit(310)
else:
api_implementation._c_module.SetAllowOversizeProtos(True)
msg = unittest_pb2.TestRecursiveMessage()
msg.ParseFromString(self.GenerateNestedProto(101))
decoder.SetRecursionLimit(decoder.DEFAULT_RECURSION_LIMIT)
if __name__ == '__main__':
unittest.main()
| OversizeProtosTest |
python | spack__spack | lib/spack/spack/directory_layout.py | {
"start": 14881,
"end": 15161
} | class ____(DirectoryLayoutError):
"""Raised when an extension is added to a package that already has it."""
def __init__(self, spec, ext_spec):
super().__init__("%s is already installed in %s" % (ext_spec.short_spec, spec.short_spec))
| ExtensionAlreadyInstalledError |
python | pyqtgraph__pyqtgraph | pyqtgraph/jupyter/GraphicsView.py | {
"start": 4851,
"end": 5847
} | class ____(GraphicsView):
"""jupyter_rfb analogue of
:class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`."""
def __init__(self, **kwds):
super().__init__(**kwds)
self.gfxLayout = graphicsItems.GraphicsLayout.GraphicsLayout()
for n in [
'nextRow', 'nextCol', 'nextColumn', 'addItem', 'getItem',
'addLayout', 'addLabel', 'removeItem', 'itemIndex', 'clear'
]:
setattr(self, n, getattr(self.gfxLayout, n))
self.gfxView.setCentralItem(self.gfxLayout)
def addPlot(self, *args, **kwds):
kwds["enableMenu"] = False
plotItem = self.gfxLayout.addPlot(*args, **kwds)
connect_viewbox_redraw(plotItem.getViewBox(), self.request_draw)
return plotItem
def addViewBox(self, *args, **kwds):
kwds["enableMenu"] = False
vb = self.gfxLayout.addViewBox(*args, **kwds)
connect_viewbox_redraw(vb, self.request_draw)
return vb
| GraphicsLayoutWidget |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/dynamic_args/test_multiple_eval_dataloaders.py | {
"start": 749,
"end": 1005
} | class ____(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return torch.zeros(1)
def __len__(self):
return self.len
| RandomDatasetA |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 33270,
"end": 33636
} | class ____(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ("options",)
options: list[Keyword]
| EvalContextModifier |
python | pytorch__pytorch | test/test_jit.py | {
"start": 15000,
"end": 15141
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.bar = torch.jit.ScriptModule()
| FooToPickle |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 6797,
"end": 11680
} | class ____(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
| InterpolationEngine |
python | numba__numba | numba/tests/test_multi3.py | {
"start": 104,
"end": 1229
} | class ____(unittest.TestCase):
"""
This test is only relevant for 32-bit architectures.
Test __multi3 implementation in _helperlib.c.
The symbol defines a i128 multiplication.
It is necessary for working around an issue in LLVM (see issue #969).
The symbol does not exist in 32-bit platform, and should not be used by
LLVM. However, optimization passes will create i65 multiplication that
is then lowered to __multi3.
"""
def test_multi3(self):
@njit("(int64,)")
def func(x):
res = 0
for i in range(x):
res += i
return res
x_cases = [-1, 0, 1, 3, 4, 8,
0xffffffff - 1, 0xffffffff, 0xffffffff + 1,
0x123456789abcdef, -0x123456789abcdef]
for _ in range(500):
x_cases.append(random.randint(0, 0xffffffff))
def expected(x):
if x <= 0: return 0
return ((x * (x - 1)) // 2) & (2**64 - 1)
for x in x_cases:
self.assertEqual(expected(x), func(x))
if __name__ == '__main__':
unittest.main()
| TestMulti3 |
python | numba__numba | numba/core/cpu_options.py | {
"start": 416,
"end": 1885
} | class ____(AbstractOptionValue):
"""
Options for controlling fast math optimization.
"""
def __init__(self, value):
# https://releases.llvm.org/7.0.0/docs/LangRef.html#fast-math-flags
valid_flags = {
'fast',
'nnan', 'ninf', 'nsz', 'arcp',
'contract', 'afn', 'reassoc',
}
if isinstance(value, FastMathOptions):
self.flags = value.flags.copy()
elif value is True:
self.flags = {'fast'}
elif value is False:
self.flags = set()
elif isinstance(value, set):
invalid = value - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = value
elif isinstance(value, dict):
invalid = set(value.keys()) - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = {v for v, enable in value.items() if enable}
else:
msg = "Expected fastmath option(s) to be either a bool, dict or set"
raise ValueError(msg)
def __bool__(self):
return bool(self.flags)
__nonzero__ = __bool__
def encode(self) -> str:
return str(self.flags)
def __eq__(self, other):
if type(other) is type(self):
return self.flags == other.flags
return NotImplemented
| FastMathOptions |
python | urllib3__urllib3 | test/with_dummyserver/test_socketlevel.py | {
"start": 86047,
"end": 87740
} | class ____(SocketDummyServerTestCase):
@notWindows()
def test_ignore_broken_pipe_errors(self, monkeypatch: pytest.MonkeyPatch) -> None:
# On Windows an aborted connection raises an error on
# attempts to read data out of a socket that's been closed.
sock_shut = Event()
orig_connect = HTTPConnection.connect
# a buffer that will cause two sendall calls
buf = "a" * 1024 * 1024 * 4
def connect_and_wait(*args: typing.Any, **kw: typing.Any) -> None:
ret = orig_connect(*args, **kw)
assert sock_shut.wait(5)
return ret
def socket_handler(listener: socket.socket) -> None:
for i in range(2):
sock = listener.accept()[0]
sock.send(
b"HTTP/1.1 404 Not Found\r\n"
b"Connection: close\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"xxxxxxxxxx"
)
sock.shutdown(socket.SHUT_RDWR)
sock_shut.set()
sock.close()
monkeypatch.setattr(HTTPConnection, "connect", connect_and_wait)
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/", body=buf)
assert r.status == 404
assert r.headers["content-length"] == "10"
assert r.data == b"xxxxxxxxxx"
r = pool.request("POST", "/admin", chunked=True, body=buf)
assert r.status == 404
assert r.headers["content-length"] == "10"
assert r.data == b"xxxxxxxxxx"
| TestBrokenPipe |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/embed/footer.py | {
"start": 80,
"end": 212
} | class ____(TypedDict):
text: str
icon_url: NotRequired[str]
proxy_icon_url: NotRequired[str]
| DiscordMessageEmbedFooterDict |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 150169,
"end": 150402
} | class ____(str, Enum):
"""
Storage in memory (RAM) Will be very fast at the cost of consuming a lot of memory.
"""
def __str__(self) -> str:
return str(self.value)
MEMORY = "Memory"
| VectorStorageTypeOneOf |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1008832,
"end": 1009269
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UnfollowUser"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "user")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
user = sgqlc.types.Field("User", graphql_name="user")
"""The user that was unfollowed."""
| UnfollowUserPayload |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 63188,
"end": 67888
} | class ____(FunctionPass):
"""Implement literal propagation based on partial type inference"""
_name = "PropagateLiterals"
def __init__(self):
FunctionPass.__init__(self)
def get_analysis_usage(self, AU):
AU.add_required(ReconstructSSA)
def run_pass(self, state):
func_ir = state.func_ir
typemap = state.typemap
flags = state.flags
accepted_functions = ('isinstance', 'hasattr')
if not hasattr(func_ir, '_definitions') \
and not flags.enable_ssa:
func_ir._definitions = build_definitions(func_ir.blocks)
changed = False
for block in func_ir.blocks.values():
for assign in block.find_insts(ir.Assign):
value = assign.value
if isinstance(value, (ir.Arg, ir.Const, ir.FreeVar, ir.Global)):
continue
# 1) Don't change return stmt in the form
# $return_xyz = cast(value=ABC)
# 2) Don't propagate literal values that are not primitives
if isinstance(value, ir.Expr) and \
value.op in ('cast', 'build_map', 'build_list',
'build_tuple', 'build_set'):
continue
target = assign.target
if not flags.enable_ssa:
# SSA is disabled when doing inlining
if guard(get_definition, func_ir, target.name) is None: # noqa: E501
continue
# Numba cannot safely determine if an isinstance call
# with a PHI node is True/False. For instance, in
# the case below, the partial type inference step can coerce
# '$z' to float, so any call to 'isinstance(z, int)' would fail.
#
# def fn(x):
# if x > 4:
# z = 1
# else:
# z = 3.14
# if isinstance(z, int):
# print('int')
# else:
# print('float')
#
# At the moment, one avoid propagating the literal
# value if the argument is a PHI node
if isinstance(value, ir.Expr) and value.op == 'call':
fn = guard(get_definition, func_ir, value.func.name)
if fn is None:
continue
if not (isinstance(fn, ir.Global) and fn.name in
accepted_functions):
continue
for arg in value.args:
# check if any of the args to isinstance is a PHI node
iv = func_ir._definitions[arg.name]
assert len(iv) == 1 # SSA!
if isinstance(iv[0], ir.Expr) and iv[0].op == 'phi':
msg = (f'{fn.name}() cannot determine the '
f'type of variable "{arg.unversioned_name}" '
'due to a branch.')
raise errors.NumbaTypeError(msg, loc=assign.loc)
# Only propagate a PHI node if all arguments are the same
# constant
if isinstance(value, ir.Expr) and value.op == 'phi':
# typemap will return None in case `inc.name` not in typemap
v = [typemap.get(inc.name) for inc in value.incoming_values]
# stop if the elements in `v` do not hold the same value
if v[0] is not None and any([v[0] != vi for vi in v]):
continue
lit = typemap.get(target.name, None)
if lit and isinstance(lit, types.Literal):
# replace assign instruction by ir.Const(lit) iff
# lit is a literal value
rhs = ir.Const(lit.literal_value, assign.loc)
new_assign = ir.Assign(rhs, target, assign.loc)
# replace instruction
block.insert_after(new_assign, assign)
block.remove(assign)
changed = True
# reset type inference now we are done with the partial results
state.typemap = None
state.calltypes = None
if changed:
# Rebuild definitions
func_ir._definitions = build_definitions(func_ir.blocks)
return changed
@register_pass(mutates_CFG=True, analysis_only=False)
| PropagateLiterals |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 145110,
"end": 146034
} | class ____(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _sf(self, x):
return 2.0/np.pi*np.arctan(np.exp(-x))
def _isf(self, q):
return -np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
| hypsecant_gen |
python | django__django | django/db/migrations/operations/models.py | {
"start": 23383,
"end": 25815
} | class ____(ModelOptionOperation):
option_name = None
def __init__(self, name, option_value):
if option_value:
option_value = set(normalize_together(option_value))
setattr(self, self.option_name, option_value)
super().__init__(name)
@cached_property
def option_value(self):
return getattr(self, self.option_name)
def deconstruct(self):
kwargs = {
"name": self.name,
self.option_name: self.option_value,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.option_value},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
from_model_state = from_state.models[app_label, self.name_lower]
to_model_state = to_state.models[app_label, self.name_lower]
alter_together = getattr(schema_editor, "alter_%s" % self.option_name)
alter_together(
new_model,
from_model_state.options.get(self.option_name) or set(),
to_model_state.options.get(self.option_name) or set(),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
not self.option_value
or any((name in fields) for fields in self.option_value)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (
self.option_name,
self.name,
len(self.option_value or ""),
)
@property
def migration_name_fragment(self):
return "alter_%s_%s" % (self.name_lower, self.option_name)
def can_reduce_through(self, operation, app_label):
return super().can_reduce_through(operation, app_label) or (
isinstance(operation, AlterTogetherOptionOperation)
and type(operation) is not type(self)
)
| AlterTogetherOptionOperation |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F821_33.py | {
"start": 89,
"end": 334
} | class ____:
g = lambda self: (lambda: __class__)
print(D().g()().__name__)
# Test: lambda outside class (should still fail)
h = lambda: __class__
# Test: lambda referencing module-level variable (should not be flagged as F821)
import uuid
| D |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial005.py | {
"start": 367,
"end": 3890
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(
default=None, foreign_key="team.id", ondelete="RESTRICT"
)
team: Optional[Team] = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
with engine.connect() as connection:
connection.execute(text("PRAGMA foreign_keys=ON")) # for SQLite only
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def remove_team_heroes():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
team.heroes.clear()
session.add(team)
session.commit()
session.refresh(team)
print("Team with removed heroes:", team)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion has no team:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E has no team:", hero)
def main():
create_db_and_tables()
create_heroes()
remove_team_heroes()
delete_team()
select_deleted_heroes()
if __name__ == "__main__":
main()
| Hero |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/knapsack_unbounded/test_knapsack_unbounded.py | {
"start": 18,
"end": 862
} | class ____(unittest.TestCase):
def test_knapsack(self):
knapsack = Knapsack()
self.assertRaises(TypeError, knapsack.fill_knapsack, None, None)
self.assertEqual(knapsack.fill_knapsack(0, 0), 0)
items = []
items.append(Item(label='a', value=1, weight=1))
items.append(Item(label='b', value=3, weight=2))
items.append(Item(label='c', value=7, weight=4))
total_weight = 8
expected_value = 14
results = knapsack.fill_knapsack(items, total_weight)
total_weight = 7
expected_value = 11
results = knapsack.fill_knapsack(items, total_weight)
self.assertEqual(results, expected_value)
print('Success: test_knapsack')
def main():
test = TestKnapsack()
test.test_knapsack()
if __name__ == '__main__':
main()
| TestKnapsack |
python | celery__celery | t/unit/tasks/test_chord.py | {
"start": 611,
"end": 1138
} | class ____(GroupResult):
is_ready = True
value = None
def ready(self):
return self.is_ready
def join(self, propagate=True, **kwargs):
if propagate:
for value in self.value:
if isinstance(value, Exception):
raise value
return self.value
join_native = join
def _failed_join_report(self):
for value in self.value:
if isinstance(value, Exception):
yield EagerResult('some_id', value, 'FAILURE')
| TSR |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_compiler.py | {
"start": 14636,
"end": 17863
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "sqlite"
def setup_test(self):
self.table = table(
"mytable", column("myid", String), column("name", String)
)
@testing.only_on("sqlite >= 3.9")
def test_determinsitic_parameter(self):
"""for #9379, make sure that "deterministic=True" is used when we are
on python 3.8 with modern SQLite version.
For the case where we are not on py3.8 or not on modern sqlite version,
the rest of the test suite confirms that connection still passes.
"""
e = create_engine("sqlite://")
@event.listens_for(e, "do_connect", retval=True)
def _mock_connect(dialect, conn_rec, cargs, cparams):
conn = e.dialect.loaded_dbapi.connect(":memory:")
return mock.Mock(wraps=conn)
c = e.connect()
eq_(
c.connection.driver_connection.create_function.mock_calls,
[
mock.call("regexp", 2, mock.ANY, deterministic=True),
mock.call("floor", 1, mock.ANY, deterministic=True),
],
)
def test_regexp_match(self):
self.assert_compile(
self.table.c.myid.regexp_match("pattern"),
"mytable.myid REGEXP ?",
checkpositional=("pattern",),
)
def test_regexp_match_column(self):
self.assert_compile(
self.table.c.myid.regexp_match(self.table.c.name),
"mytable.myid REGEXP mytable.name",
checkparams={},
)
def test_regexp_match_str(self):
self.assert_compile(
literal("string").regexp_match(self.table.c.name),
"? REGEXP mytable.name",
checkpositional=("string",),
)
def test_regexp_match_flags(self):
self.assert_compile(
self.table.c.myid.regexp_match("pattern", flags="ig"),
"mytable.myid REGEXP ?",
checkpositional=("pattern",),
)
def test_not_regexp_match(self):
self.assert_compile(
~self.table.c.myid.regexp_match("pattern"),
"mytable.myid NOT REGEXP ?",
checkpositional=("pattern",),
)
def test_not_regexp_match_flags(self):
self.assert_compile(
~self.table.c.myid.regexp_match("pattern", flags="ig"),
"mytable.myid NOT REGEXP ?",
checkpositional=("pattern",),
)
def test_not_regexp_match_column(self):
self.assert_compile(
~self.table.c.myid.regexp_match(self.table.c.name),
"mytable.myid NOT REGEXP mytable.name",
checkparams={},
)
def test_not_regexp_match_str(self):
self.assert_compile(
~literal("string").regexp_match(self.table.c.name),
"? NOT REGEXP mytable.name",
checkpositional=("string",),
)
def test_regexp_replace(self):
assert_raises_message(
exc.CompileError,
"sqlite dialect does not support regular expression replacements",
self.table.c.myid.regexp_replace("pattern", "rep").compile,
dialect=sqlite.dialect(),
)
| RegexpTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/states.py | {
"start": 3030,
"end": 3775
} | class ____(TimeSeriesBaseModel):
def orm_dict(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""
This method is used as a convenience method for constructing fixtues by first
building a `State` schema object and converting it into an ORM-compatible
format. Because the `data` field is not writable on ORM states, this method
omits the `data` field entirely for the purposes of constructing an ORM model.
If state data is required, an artifact must be created separately.
"""
schema_dict = self.model_dump(*args, **kwargs)
# remove the data field in order to construct a state ORM model
schema_dict.pop("data", None)
return schema_dict
| StateBaseModel |
python | huggingface__transformers | src/transformers/models/got_ocr2/processing_got_ocr2.py | {
"start": 1098,
"end": 1177
} | class ____(TextKwargs, total=False):
format: Optional[bool]
| GotOcr2TextKwargs |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_data_validation07.py | {
"start": 314,
"end": 958
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("data_validation07.xlsx")
def test_create_file(self):
"""Test the creation of a XlsxWriter file with data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
"C2",
{
"validate": "list",
"value": ["coffee", "café"],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | astropy__astropy | astropy/cosmology/_src/tests/parameter/test_descriptors.py | {
"start": 3138,
"end": 5075
} | class ____:
"""Test the descriptor for ``parameters`` on Cosmology classes.
This is a mixin class and is mixed into
:class:`~astropy.cosmology._src.tests.test_core.CosmologyTest`.
"""
@pytest.mark.parametrize("name", ["parameters", "_derived_parameters"])
def test_parameters_from_class(self, cosmo_cls: type[Cosmology], name: str) -> None:
"""Test descriptor ``parameters`` accessed from the class."""
# test presence
assert hasattr(cosmo_cls, name)
# test Parameter is a MappingProxyType
parameters = getattr(cosmo_cls, name)
assert isinstance(parameters, MappingProxyType)
# Test items
assert all(isinstance(p, Parameter) for p in parameters.values())
assert set(parameters) == {
k
for k, v in all_parameters(cosmo_cls).items()
if v.derived == ("derived" in name)
}
@pytest.mark.parametrize("name", ["parameters", "_derived_parameters"])
def test_parameters_from_instance(self, cosmo: Cosmology, name: str) -> None:
"""Test descriptor ``parameters`` accessed from the instance."""
# test presence
assert hasattr(cosmo, name)
# test Parameter is a MappingProxyType
parameters = getattr(cosmo, name)
assert isinstance(parameters, MappingProxyType)
# Test keys
assert set(parameters) == {
k
for k, v in all_parameters(cosmo).items()
if (v.derived == ("derived" in name))
}
@pytest.mark.parametrize("name", ["parameters", "_derived_parameters"])
def test_parameters_cannot_set_on_instance(
self, cosmo: Cosmology, name: str
) -> None:
"""Test descriptor ``parameters`` cannot be set on the instance."""
with pytest.raises(AttributeError, match=f"cannot assign to field {name!r}"):
setattr(cosmo, name, {})
| ParametersAttributeTestMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.