language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_ambient_context.py
|
{
"start": 339,
"end": 2579
}
|
class ____:
"""
An abstract base context manager that, once entered, will be accessible via its `current` classmethod to any code in the same
`contextvars` context (e.g. same thread/coroutine), until it is exited.
"""
__slots__ = ('_contextvar_token',)
# DTFIX-FUTURE: subclasses need to be able to opt-in to blocking nested contexts of the same type (basically optional per-callstack singleton behavior)
# DTFIX-FUTURE: this class should enforce strict nesting of contexts; overlapping context lifetimes leads to incredibly difficult to
# debug situations with undefined behavior, so it should fail fast.
# DTFIX-FUTURE: make frozen=True dataclass subclasses work (fix the mutability of the contextvar instance)
_contextvar: t.ClassVar[contextvars.ContextVar] # pylint: disable=declare-non-slot # pylint bug, see https://github.com/pylint-dev/pylint/issues/9950
_contextvar_token: contextvars.Token
def __init_subclass__(cls, **kwargs) -> None:
cls._contextvar = contextvars.ContextVar(cls.__name__)
@classmethod
def when(cls, condition: bool, /, *args, **kwargs) -> t.Self | contextlib.nullcontext:
"""Return an instance of the context if `condition` is `True`, otherwise return a `nullcontext` instance."""
return cls(*args, **kwargs) if condition else contextlib.nullcontext()
@classmethod
def current(cls, optional: bool = False) -> t.Self | None:
"""
Return the currently active context value for the current thread or coroutine.
Raises ReferenceError if a context is not active, unless `optional` is `True`.
"""
try:
return cls._contextvar.get()
except LookupError:
if optional:
return None
raise ReferenceError(f"A required {cls.__name__} context is not active.") from None
def __enter__(self) -> t.Self:
# DTFIX-FUTURE: actively block multiple entry
self._contextvar_token = self.__class__._contextvar.set(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.__class__._contextvar.reset(self._contextvar_token)
del self._contextvar_token
|
AmbientContextBase
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/feature_type_text.py
|
{
"start": 650,
"end": 5315
}
|
class ____(BasePipeline):
"""This class implements a pipeline for data preprocessing of text features.
It assumes that the data to be transformed is made only of text features.
The steps of this pipeline are:
1 - Vectorize: Fits a *Vecotrizer object and apply this
2 - text feature reduction: TruncatedSVD
Parameters
----------
config : ConfigSpace.configuration_space.Configuration
The configuration to evaluate.
random_state : Optional[int | RandomState]
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance
used by `np.random`."""
def __init__(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
config: Optional[Configuration] = None,
steps: Optional[List[Tuple[str, BaseEstimator]]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
random_state: Optional[Union[int, np.random.RandomState]] = None,
init_params: Optional[Dict[str, Any]] = None,
) -> None:
self._output_dtype = np.int32
super().__init__(
config=config,
steps=steps,
dataset_properties=dataset_properties,
include=include,
exclude=exclude,
random_state=random_state,
init_params=init_params,
feat_type=feat_type,
)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "txt_datapreproc",
"name": "text data preprocessing",
"handles_missing_values": True,
"handles_nominal_values": False,
"handles_numerical_features": False,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"is_deterministic": True,
"handles_sparse": True,
"handles_dense": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
"preferred_dtype": None,
}
def _get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
"""Create the hyperparameter configuration space.
Parameters
----------
# TODO add parameter description
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the SimpleRegressionClassifier.
"""
cs = ConfigurationSpace()
if dataset_properties is None or not isinstance(dataset_properties, dict):
dataset_properties = dict()
cs = self._get_base_search_space(
cs=cs,
dataset_properties=dataset_properties,
exclude=exclude,
include=include,
pipeline=self.steps,
feat_type=feat_type,
)
return cs
def _get_pipeline_steps(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[Dict[str, str]] = None,
) -> List[Tuple[str, BaseEstimator]]:
steps = []
default_dataset_properties = {}
if dataset_properties is not None and isinstance(dataset_properties, dict):
default_dataset_properties.update(dataset_properties)
steps.extend(
[
(
"text_encoding",
BagOfWordChoice(
feat_type=feat_type,
dataset_properties=default_dataset_properties,
random_state=self.random_state,
),
),
(
"text_feature_reduction",
TextFeatureReduction(random_state=self.random_state),
),
]
)
return steps
def _get_estimator_hyperparameter_name(self) -> str:
return "text data preprocessing"
|
TextPreprocessingPipeline
|
python
|
walkccc__LeetCode
|
solutions/1830. Minimum Number of Operations to Make String Sorted/1830.py
|
{
"start": 0,
"end": 720
}
|
class ____:
def makeStringSorted(self, s: str) -> int:
MOD = 1_000_000_007
ans = 0
count = [0] * 26
@functools.lru_cache(None)
def fact(i: int) -> int:
return 1 if i <= 1 else i * fact(i - 1) % MOD
@functools.lru_cache(None)
def inv(i: int) -> int:
return pow(i, MOD - 2, MOD)
for i, c in enumerate(reversed(s)):
order = ord(c) - ord('a')
count[order] += 1
# count[:order] := s[i] can be any character smaller than c
# fact(i) := s[i + 1..n - 1] can be any sequence of characters
perm = sum(count[:order]) * fact(i)
for j in range(26):
perm = perm * inv(fact(count[j])) % MOD
ans = (ans + perm) % MOD
return ans
|
Solution
|
python
|
chroma-core__chroma
|
chromadb/execution/expression/operator.py
|
{
"start": 9394,
"end": 9608
}
|
class ____(Where):
"""Logical OR of multiple where conditions"""
conditions: List[Where]
def to_dict(self) -> Dict[str, Any]:
return {"$or": [c.to_dict() for c in self.conditions]}
@dataclass
|
Or
|
python
|
encode__django-rest-framework
|
rest_framework/permissions.py
|
{
"start": 1658,
"end": 2145
}
|
class ____:
def __init__(self, op1, op2):
self.op1 = op1
self.op2 = op2
def has_permission(self, request, view):
return (
self.op1.has_permission(request, view) and
self.op2.has_permission(request, view)
)
def has_object_permission(self, request, view, obj):
return (
self.op1.has_object_permission(request, view, obj) and
self.op2.has_object_permission(request, view, obj)
)
|
AND
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 14842,
"end": 15145
}
|
class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.AWS, frozen=True, exclude=True
)
region: str
service: str
model: Optional[str]
endpoint: Optional[str]
maxTokens: Optional[int]
|
_GenerativeAWSConfig
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 22149,
"end": 22271
}
|
class ____(AbstractModelCallable1):
name = models.CharField(max_length=15, unique=True)
|
OverrideModelNameUsingBaseModel1
|
python
|
sympy__sympy
|
sympy/functions/elementary/integers.py
|
{
"start": 9664,
"end": 16094
}
|
class ____(RoundFunction):
"""
Ceiling is a univariate function which returns the smallest integer
value not less than its argument. This implementation
generalizes ceiling to complex numbers by taking the ceiling of the
real and imaginary parts separately.
Examples
========
>>> from sympy import ceiling, E, I, S, Float, Rational
>>> ceiling(17)
17
>>> ceiling(Rational(23, 10))
3
>>> ceiling(2*E)
6
>>> ceiling(-Float(0.567))
0
>>> ceiling(I/2)
I
>>> ceiling(S(5)/2 + 5*I/2)
3 + 3*I
See Also
========
sympy.functions.elementary.integers.floor
References
==========
.. [1] "Concrete mathematics" by Graham, pp. 87
.. [2] https://mathworld.wolfram.com/CeilingFunction.html
"""
_dir = 1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
return arg.ceiling()
if any(isinstance(i, j)
for i in (arg, -arg) for j in (floor, ceiling)):
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[1]
@classmethod
def _eval_const_number(cls, arg):
if arg.is_real:
if arg.is_zero:
return S.Zero
if arg.is_positive:
num, den = arg.as_numer_denom()
s = den.is_negative
if s is None:
return None
if s:
num, den = -num, -den
# 0 < num/den <= 1 -> 1
if is_le(num, den):
return S.One
# 1 < num/den <= 2 -> 2
if fuzzy_and([is_lt(den, num), is_le(num, 2*den)]):
return Integer(2)
if arg.is_negative:
num, den = arg.as_numer_denom()
s = den.is_negative
if s is None:
return None
if s:
num, den = -num, -den
# -1 < num/den <= 0 -> 0
if is_lt(-den, num):
return S.Zero
# -2 < num/den <= -1 -> -1
if fuzzy_and([is_lt(-2*den, num), is_le(num, -den)]):
return S.NegativeOne
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.calculus.accumulationbounds import AccumBounds
arg = self.args[0]
arg0 = arg.subs(x, 0)
r = self.subs(x, 0)
if arg0 is S.NaN or isinstance(arg0, AccumBounds):
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
r = ceiling(arg0)
if arg0.is_finite:
if arg0 == r:
ndir = arg.dir(x, cdir=cdir if cdir != 0 else 1)
if ndir.is_negative:
return r
elif ndir.is_positive:
return r + 1
else:
raise NotImplementedError("Not sure of sign of %s" % ndir)
else:
return r
return arg.as_leading_term(x, logx=logx, cdir=cdir)
def _eval_nseries(self, x, n, logx, cdir=0):
arg = self.args[0]
arg0 = arg.subs(x, 0)
r = self.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
r = ceiling(arg0)
if arg0.is_infinite:
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.series.order import Order
s = arg._eval_nseries(x, n, logx, cdir)
o = Order(1, (x, 0)) if n <= 0 else AccumBounds(0, 1)
return s + o
if arg0 == r:
ndir = arg.dir(x, cdir=cdir if cdir != 0 else 1)
if ndir.is_negative:
return r
elif ndir.is_positive:
return r + 1
else:
raise NotImplementedError("Not sure of sign of %s" % ndir)
else:
return r
def _eval_rewrite_as_floor(self, arg, **kwargs):
return -floor(-arg)
def _eval_rewrite_as_frac(self, arg, **kwargs):
return arg + frac(-arg)
def _eval_is_positive(self):
return self.args[0].is_positive
def _eval_is_nonpositive(self):
return self.args[0].is_nonpositive
def __lt__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] <= other - 1
if other.is_number and other.is_real:
return self.args[0] <= floor(other)
if self.args[0] == other and other.is_real:
return S.false
if other is S.Infinity and self.is_finite:
return S.true
return Lt(self, other, evaluate=False)
def __gt__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] > other
if other.is_number and other.is_real:
return self.args[0] > floor(other)
if self.args[0] == other and other.is_real and other.is_noninteger:
return S.true
if other is S.NegativeInfinity and self.is_finite:
return S.true
return Gt(self, other, evaluate=False)
def __ge__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] > other - 1
if other.is_number and other.is_real:
return self.args[0] > floor(other)
if self.args[0] == other and other.is_real:
return S.true
if other is S.NegativeInfinity and self.is_finite:
return S.true
return Ge(self, other, evaluate=False)
def __le__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] <= other
if other.is_number and other.is_real:
return self.args[0] <= floor(other)
if self.args[0] == other and other.is_real and other.is_noninteger:
return S.false
if other is S.Infinity and self.is_finite:
return S.true
return Le(self, other, evaluate=False)
@dispatch(ceiling, Basic) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
return is_eq(lhs.rewrite(floor), rhs) or is_eq(lhs.rewrite(frac),rhs)
|
ceiling
|
python
|
cython__cython
|
Demos/benchmarks/chaos.py
|
{
"start": 4607,
"end": 9903
}
|
class ____(object):
@cython.locals(splines=list, thickness=cython.double, maxlength=cython.double, length=cython.double,
curr=GVector, last=GVector, p=GVector, spl=Spline, t=cython.double, i=int)
def __init__(self, splines, thickness=0.1):
self.splines = splines
self.thickness = thickness
self.minx = min([p.x for spl in splines for p in spl.points])
self.miny = min([p.y for spl in splines for p in spl.points])
self.maxx = max([p.x for spl in splines for p in spl.points])
self.maxy = max([p.y for spl in splines for p in spl.points])
self.height = self.maxy - self.miny
self.width = self.maxx - self.minx
self.num_trafos = []
maxlength = thickness * self.width / self.height
for spl in splines:
length = 0
curr = spl(0)
for i in range(1, 1000):
last = curr
t = 1 / 999 * i
curr = spl(t)
length += curr.dist(last)
self.num_trafos.append(max(1, int(length / maxlength * 1.5)))
self.num_total = reduce(operator.add, self.num_trafos, 0)
def get_random_trafo(self):
r = random.randrange(int(self.num_total) + 1)
l = 0
for i in range(len(self.num_trafos)):
if l <= r < l + self.num_trafos[i]:
return i, random.randrange(self.num_trafos[i])
l += self.num_trafos[i]
return len(self.num_trafos) - 1, random.randrange(self.num_trafos[-1])
@cython.locals(neighbour="GVector", basepoint="GVector", derivative="GVector",
seg_length=cython.double, start=cython.double, end=cython.double,
t=cython.double)
def transform_point(self, point, trafo=None):
x = (point.x - self.minx) / self.width
y = (point.y - self.miny) / self.height
if trafo is None:
trafo = self.get_random_trafo()
start, end = self.splines[trafo[0]].GetDomain()
length = end - start
seg_length = length / self.num_trafos[trafo[0]]
t = start + seg_length * trafo[1] + seg_length * x
basepoint = self.splines[trafo[0]](t)
if t + 1/50000 > end:
neighbour = self.splines[trafo[0]](t - 1/50000)
derivative = neighbour - basepoint
else:
neighbour = self.splines[trafo[0]](t + 1/50000)
derivative = basepoint - neighbour
if derivative.Mag() != 0:
basepoint.x += derivative.y / derivative.Mag() * (y - 0.5) * \
self.thickness
basepoint.y += -derivative.x / derivative.Mag() * (y - 0.5) * \
self.thickness
else:
print("r", end='')
self.truncate(basepoint)
return basepoint
def truncate(self, point):
if point.x >= self.maxx:
point.x = self.maxx
if point.y >= self.maxy:
point.y = self.maxy
if point.x < self.minx:
point.x = self.minx
if point.y < self.miny:
point.y = self.miny
@cython.locals(x=cython.long, y=cython.long)
def create_image_chaos(self, timer, w, h, n):
im = [[1] * h for i in range(w)]
point = GVector((self.maxx + self.minx) / 2,
(self.maxy + self.miny) / 2, 0)
times = []
for _ in range(n):
t1 = timer()
for i in range(5000):
point = self.transform_point(point)
x = int((point.x - self.minx) / self.width * w)
y = int((point.y - self.miny) / self.height * h)
if x == w:
x -= 1
if y == h:
y -= 1
im[x][h - y - 1] = 0
t2 = timer()
times.append(t2 - t1)
return times
def main(n, timer=time.time):
splines = [
Spline([
GVector(1.597350, 3.304460, 0.000000),
GVector(1.575810, 4.123260, 0.000000),
GVector(1.313210, 5.288350, 0.000000),
GVector(1.618900, 5.329910, 0.000000),
GVector(2.889940, 5.502700, 0.000000),
GVector(2.373060, 4.381830, 0.000000),
GVector(1.662000, 4.360280, 0.000000)],
3, [0, 0, 0, 1, 1, 1, 2, 2, 2]),
Spline([
GVector(2.804500, 4.017350, 0.000000),
GVector(2.550500, 3.525230, 0.000000),
GVector(1.979010, 2.620360, 0.000000),
GVector(1.979010, 2.620360, 0.000000)],
3, [0, 0, 0, 1, 1, 1]),
Spline([
GVector(2.001670, 4.011320, 0.000000),
GVector(2.335040, 3.312830, 0.000000),
GVector(2.366800, 3.233460, 0.000000),
GVector(2.366800, 3.233460, 0.000000)],
3, [0, 0, 0, 1, 1, 1])
]
c = Chaosgame(splines, 0.25)
return c.create_image_chaos(timer, 1000, 1200, n)
if __name__ == "__main__":
import util
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Chaos benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
|
Chaosgame
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/report_check_status_response_builder.py
|
{
"start": 277,
"end": 836
}
|
class ____(HttpResponseBuilder):
@classmethod
def check_status_response(cls) -> "ReportCheckStatusResponseBuilder":
return cls(find_template("report_status_response", __file__), DictTemplatePath(), None)
def with_record(self, record: RecordBuilder) -> HttpResponseBuilder:
self._records = record
return self
def build(self) -> HttpResponse:
self._records_path.update(self._response, self._records.build())
return HttpResponse(json.dumps(self._response), self._status_code)
|
ReportCheckStatusResponseBuilder
|
python
|
tensorflow__tensorflow
|
tensorflow/python/profiler/profiler_v2.py
|
{
"start": 6608,
"end": 7426
}
|
class ____(object):
"""Context-manager profile API.
Profiling will start when entering the scope, and stop and save the results to
the logdir when exits the scope. Open TensorBoard profile tab to view results.
Example usage:
```python
with tf.profiler.experimental.Profile("/path/to/logdir"):
# do some work
```
"""
def __init__(self, logdir, options=None):
"""Creates a context manager object for profiler API.
Args:
logdir: profile data will save to this directory.
options: An optional `tf.profiler.experimental.ProfilerOptions` can be
provided to fine tune the profiler's behavior.
"""
self._logdir = logdir
self._options = options
def __enter__(self):
start(self._logdir, self._options)
def __exit__(self, typ, value, tb):
stop()
|
Profile
|
python
|
run-llama__llama_index
|
llama-index-core/tests/tools/tool_spec/test_base.py
|
{
"start": 296,
"end": 354
}
|
class ____(BaseModel):
arg1: str
arg2: int
|
FooSchema
|
python
|
great-expectations__great_expectations
|
docs/sphinx_api_docs_source/public_api_report.py
|
{
"start": 18977,
"end": 28813
}
|
class ____:
"""Bring together various parsing and filtering tools to build a filtered set of Definitions.
Also adds the capability of filtering and including whole files or entities manually.
"""
DEFAULT_INCLUDES = public_api_includes.DEFAULT_INCLUDES
DEFAULT_EXCLUDES = public_api_excludes.DEFAULT_EXCLUDES
def __init__( # noqa: PLR0913
self,
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
references_from_docs_content: set[str] | None = None,
excludes: Union[List[IncludeExcludeDefinition], None] = None,
includes: Union[List[IncludeExcludeDefinition], None] = None,
) -> None:
"""Create a CodeReferenceFilter.
Args:
repo_root: Repository root directory, for use in creating relative paths.
docs_example_parser: A DocsExampleParser initialized with the file
contents from all docs examples to process.
code_parser: A CodeParser initialized with library code.
public_api_checker: A PublicAPIChecker to aid in filtering.
excludes: Override default excludes by supplying a list of
IncludeExcludeDefinition instances.
includes: Override default includes by supplying a list of
IncludeExcludeDefinition instances. Note: Includes override
excludes if they are conflicting.
"""
self.repo_root = repo_root
self.docs_example_parser = docs_example_parser
self.code_parser = code_parser
self.public_api_checker = public_api_checker
if not references_from_docs_content:
self.references_from_docs_content = set()
else:
self.references_from_docs_content = references_from_docs_content
if not excludes:
self.excludes = self.DEFAULT_EXCLUDES
else:
self.excludes = excludes
if not includes:
self.includes = self.DEFAULT_INCLUDES
else:
self.includes = includes
def filter_definitions(self) -> Set[Definition]:
"""Main method to perform all filtering.
Filters Definitions of entities (class, method and function).
Returned Definitions:
1. Appear in published documentation examples.
2. Are not private.
3. Are included or not excluded by an IncludeExcludeDefinition.
4. Are not marked with the @public_api decorator.
Returns:
Definitions that pass all filters.
"""
usages_in_docs_examples_and_docs_content: Set[str] = (
self._docs_examples_usages() | self.references_from_docs_content
)
gx_definitions_used_in_docs_examples: Set[Definition] = (
self._filter_gx_definitions_from_docs_examples(
gx_usages_in_docs_examples=usages_in_docs_examples_and_docs_content
)
)
non_private_definitions: Set[Definition] = self._filter_private_entities(
definitions=gx_definitions_used_in_docs_examples
)
included_definitions: Set[Definition] = self._filter_or_include(
definitions=non_private_definitions
)
definitions_not_marked_public_api: Set[Definition] = (
self._filter_for_definitions_not_marked_public_api(
definitions=included_definitions
)
)
return definitions_not_marked_public_api
def _docs_examples_usages(self) -> Set[str]:
"""Filter list of classes & methods from docs examples to only those found in
the GX codebase
(e.g. filter out print() or other python or 3rd party classes/methods).
"""
doc_example_usages: Set[str] = (
self.docs_example_parser.get_names_from_usage_in_docs_examples()
)
gx_code_definitions = self.code_parser.get_all_class_method_and_function_names()
doc_example_usages_of_gx_code = doc_example_usages.intersection(
gx_code_definitions
)
return doc_example_usages_of_gx_code
def _filter_gx_definitions_from_docs_examples(
self, gx_usages_in_docs_examples: Set[str]
) -> Set[Definition]:
"""Filter the list of GX definitions except those used in docs examples.
Use the docs examples filtered list against the list of class and method
definitions in the GX codebase to generate the full list with definition
locations in the GX codebase.
Returns:
Set of Definition objects with filepath locations.
"""
gx_code_definitions = (
self.code_parser.get_all_class_method_and_function_definitions()
)
gx_code_definitions_appearing_in_docs_examples = {
d for d in gx_code_definitions if d.name in gx_usages_in_docs_examples
}
return gx_code_definitions_appearing_in_docs_examples
def _filter_private_entities(self, definitions: Set[Definition]) -> Set[Definition]:
"""Filter out private entities (classes, methods and functions with leading underscore)."""
return {d for d in definitions if not self._is_definition_private(definition=d)}
def _filter_or_include(self, definitions: Set[Definition]) -> Set[Definition]:
"""Filter definitions per all IncludeExcludeDefinition directives.
Includes override excludes, and also don't require the included entity
to be used in docs examples.
"""
included_definitions: List[Definition] = []
all_gx_code_definitions = (
self.code_parser.get_all_class_method_and_function_definitions()
)
for definition in definitions:
definition_filepath = self._repo_relative_filepath(
filepath=definition.filepath
)
exclude: bool = self._is_filepath_excluded(
definition_filepath
) or self._is_definition_excluded(definition)
include: bool = self._is_filepath_included(
definition_filepath
) or self._is_definition_included(definition)
if include or not exclude:
included_definitions.append(definition)
for definition in all_gx_code_definitions:
definition_filepath = self._repo_relative_filepath(
filepath=definition.filepath
)
include_from_all_gx_definitions: bool = self._is_filepath_included(
definition_filepath
) or self._is_definition_included(definition)
if (
include_from_all_gx_definitions
and definition not in included_definitions
):
included_definitions.append(definition)
return set(included_definitions)
def _repo_relative_filepath(self, filepath: pathlib.Path) -> pathlib.Path:
if filepath.is_absolute():
return filepath.relative_to(self.repo_root)
else:
return filepath
def _repo_relative_filepath_comparison(
self, filepath_1: pathlib.Path, filepath_2: pathlib.Path
) -> bool:
return str(self._repo_relative_filepath(filepath_1)) == str(
self._repo_relative_filepath(filepath_2)
)
def _filter_for_definitions_not_marked_public_api(
self, definitions: Set[Definition]
) -> Set[Definition]:
"""Return only those Definitions that are not marked with the public api decorator."""
return {
d
for d in definitions
if not self.public_api_checker.is_definition_marked_public_api(d)
}
def _is_filepath_excluded(self, filepath: pathlib.Path) -> bool:
"""Check whether an entire filepath is excluded."""
full_filepaths_excluded = [p.filepath for p in self.excludes if not p.name]
return filepath in full_filepaths_excluded
def _is_definition_excluded(self, definition: Definition) -> bool:
"""Check whether a definition (filepath / name combo) is excluded."""
definitions_excluded = [d for d in self.excludes if d.name and d.filepath]
for definition_excluded in definitions_excluded:
filepath_excluded = self._repo_relative_filepath_comparison(
definition.filepath,
definition_excluded.filepath, # type: ignore[arg-type]
)
name_excluded = definition.name == definition_excluded.name
if filepath_excluded and name_excluded:
return True
return False
def _is_filepath_included(self, filepath: pathlib.Path) -> bool:
"""Check whether an entire filepath is included."""
full_filepaths_included = [p.filepath for p in self.includes if not p.name]
return filepath in full_filepaths_included
def _is_definition_included(self, definition: Definition) -> bool:
"""Check whether a definition (filepath / name combo) is included."""
definitions_included = [d for d in self.includes if d.name and d.filepath]
for definition_included in definitions_included:
filepath_included = self._repo_relative_filepath_comparison(
definition.filepath,
definition_included.filepath, # type: ignore[arg-type]
)
name_included = definition.name == definition_included.name
if filepath_included and name_included:
return True
return False
def _is_definition_private(self, definition: Definition) -> bool:
"""Check whether the name of a definition is for a private method or class."""
return definition.name.startswith("_")
|
CodeReferenceFilter
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/asset_graph.py
|
{
"start": 57107,
"end": 57342
}
|
class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
groupName = graphene.NonNull(graphene.String)
assetKeys = non_null_list(GrapheneAssetKey)
class Meta:
name = "AssetGroup"
|
GrapheneAssetGroup
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware_httperror.py
|
{
"start": 4654,
"end": 6220
}
|
class ____:
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider, {"HTTPERROR_ALLOW_ALL": True})
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self,
mw: HttpErrorMiddleware,
res200: Response,
res404: Response,
) -> None:
mw.process_spider_input(res200)
mw.process_spider_input(res404)
def test_meta_overrides_settings(self, mw: HttpErrorMiddleware) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res404 = _response(request, 404)
res402 = _response(request, 402)
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
def test_httperror_allow_all_false(self) -> None:
crawler = get_crawler(_HttpErrorSpider)
mw = HttpErrorMiddleware.from_crawler(crawler)
request_httpstatus_false = Request(
"http://scrapytest.org", meta={"handle_httpstatus_all": False}
)
request_httpstatus_true = Request(
"http://scrapytest.org", meta={"handle_httpstatus_all": True}
)
res404 = _response(request_httpstatus_false, 404)
res402 = _response(request_httpstatus_true, 402)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
mw.process_spider_input(res402)
|
TestHttpErrorMiddlewareHandleAll
|
python
|
python-pillow__Pillow
|
src/PIL/ImageFile.py
|
{
"start": 23897,
"end": 25976
}
|
class ____:
fd: IO[bytes] | None
def __init__(self, mode: str, *args: Any) -> None:
self.im: Image.core.ImagingCore | None = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args: tuple[Any, ...]) -> None:
"""
Override to perform codec specific initialization
:param args: Tuple of arg items from the tile entry
:returns: None
"""
self.args = args
def cleanup(self) -> None:
"""
Override to perform codec specific cleanup
:returns: None
"""
pass
def setfd(self, fd: IO[bytes]) -> None:
"""
Called from ImageFile to set the Python file-like object
:param fd: A Python file-like object
:returns: None
"""
self.fd = fd
def setimage(
self,
im: Image.core.ImagingCore,
extents: tuple[int, int, int, int] | None = None,
) -> None:
"""
Called from ImageFile to set the core output image for the codec
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
msg = "Size cannot be negative"
raise ValueError(msg)
if (
self.state.xsize + self.state.xoff > self.im.size[0]
or self.state.ysize + self.state.yoff > self.im.size[1]
):
msg = "Tile cannot extend outside image"
raise ValueError(msg)
|
PyCodec
|
python
|
getsentry__sentry
|
src/sentry/rules/processing/delayed_processing.py
|
{
"start": 4700,
"end": 9003
}
|
class ____:
"""
LogConfig efficiently caches the results of features.has calls; these are project/org
based, should be stable within our task, and caching them helps avoid generating
excessive spans and saves a bit of time.
"""
# Cached value of features.has("projects:num-events-issue-debugging", project)
num_events_issue_debugging: bool
@classmethod
def create(cls, project: Project) -> "LogConfig":
return cls(
num_events_issue_debugging=features.has("projects:num-events-issue-debugging", project),
)
def generate_unique_queries(
condition_data: EventFrequencyConditionData, environment_id: int
) -> list[UniqueConditionQuery]:
"""
Returns a list of all unique condition queries that must be made for the
given condition instance.
Count comparison conditions will only have one unique query, while percent
comparison conditions will have two unique queries.
"""
unique_queries = [
UniqueConditionQuery(
cls_id=condition_data["id"],
interval=condition_data["interval"],
environment_id=environment_id,
)
]
if condition_data.get("comparisonType") == ComparisonType.PERCENT:
# We will later compare the first query results against the second query to calculate
# a percentage for percentage comparison conditions.
comparison_interval = condition_data.get("comparisonInterval", DEFAULT_COMPARISON_INTERVAL)
second_query_data = unique_queries[0]._asdict()
second_query_data["comparison_interval"] = comparison_interval
unique_queries.append(UniqueConditionQuery(**second_query_data))
return unique_queries
def get_condition_query_groups(
alert_rules: list[Rule], rules_to_groups: DefaultDict[int, set[int]]
) -> dict[UniqueConditionQuery, DataAndGroups]:
"""
Map unique condition queries to the group IDs that need to checked for that
query. We also store a pointer to that condition's JSON so we can
instantiate the class later.
"""
condition_groups: dict[UniqueConditionQuery, DataAndGroups] = {}
for rule in alert_rules:
slow_conditions = get_slow_conditions(rule)
for condition_data in slow_conditions:
for condition_query in generate_unique_queries(condition_data, rule.environment_id):
# NOTE: If percent and count comparison conditions are sharing
# the same UniqueConditionQuery, the condition JSON in
# DataAndGroups will be incorrect for one of those types.
# The JSON will either have or be missing a comparisonInterval
# which only applies to percent conditions, and have the incorrect
# comparisonType for one type. This is not a concern because
# when we instantiate the exact condition class with the JSON,
# the class ignores both fields when calling get_rate_bulk.
# Add to set of group_ids if there are already group_ids
# that apply to the unique condition query.
if data_and_groups := condition_groups.get(condition_query):
data_and_groups.group_ids.update(rules_to_groups[rule.id])
else:
condition_groups[condition_query] = DataAndGroups(
condition_data, set(rules_to_groups[rule.id]), rule.id
)
return condition_groups
def bulk_fetch_events(event_ids: list[str], project_id: int) -> dict[str, Event]:
node_id_to_event_id = {
Event.generate_node_id(project_id, event_id=event_id): event_id for event_id in event_ids
}
node_ids = list(node_id_to_event_id.keys())
fetch_retry_policy = ConditionalRetryPolicy(should_retry_fetch, exponential_delay(1.00))
bulk_data = {}
for node_id_chunk in chunked(node_ids, EVENT_LIMIT):
bulk_results = fetch_retry_policy(lambda: nodestore.backend.get_multi(node_id_chunk))
bulk_data.update(bulk_results)
return {
node_id_to_event_id[node_id]: Event(
event_id=node_id_to_event_id[node_id], project_id=project_id, data=data
)
for node_id, data in bulk_data.items()
if data is not None
}
|
LogConfig
|
python
|
spyder-ide__spyder
|
external-deps/qtconsole/qtconsole/inprocess.py
|
{
"start": 1892,
"end": 2240
}
|
class ____(QtKernelClientMixin, InProcessKernelClient):
""" An in-process KernelManager with signals and slots.
"""
iopub_channel_class = Type(QtInProcessChannel)
shell_channel_class = Type(QtInProcessChannel)
stdin_channel_class = Type(QtInProcessChannel)
hb_channel_class = Type(QtInProcessHBChannel)
|
QtInProcessKernelClient
|
python
|
milvus-io__pymilvus
|
tests/test_milvus_lite.py
|
{
"start": 316,
"end": 2649
}
|
class ____:
@pytest.mark.skip("Milvus Lite is now an optional dependency. This test will be fixed later.")
def test_milvus_lite(self):
with TemporaryDirectory(dir="./") as root:
db_file = pathlib.Path(root).joinpath("test.db")
client = MilvusClient(db_file.as_posix())
client.create_collection(collection_name="demo_collection", dimension=3)
# Text strings to search from.
docs = [
"Artificial intelligence was founded as an academic discipline in 1956.",
"Alan Turing was the first person to conduct substantial research in AI.",
"Born in Maida Vale, London, Turing was raised in southern England.",
]
rng = np.random.default_rng(seed=19530)
vectors = [[rng.uniform(-1, 1) for _ in range(3)] for _ in range(len(docs))]
data = [
{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
for i in range(len(vectors))
]
res = client.insert(collection_name="demo_collection", data=data)
assert res["insert_count"] == 3
res = client.search(
collection_name="demo_collection",
data=[vectors[0]],
filter="subject == 'history'",
limit=2,
output_fields=["text", "subject"],
)
assert len(res[0]) == 2
# a query that retrieves all entities matching filter expressions.
res = client.query(
collection_name="demo_collection",
filter="subject == 'history'",
output_fields=["text", "subject"],
)
assert len(res) == 3
# delete
res = client.delete(
collection_name="demo_collection",
filter="subject == 'history'",
)
assert len(res) == 3
def test_illegal_name(self):
with pytest.raises(ConnectionConfigException) as e:
MilvusClient("localhost")
# check the raised exception contained
assert (
e.value.message
== "uri: localhost is illegal, needs start with [unix, http, https, tcp] or a local file endswith [.db]"
)
|
TestMilvusLite
|
python
|
facebookresearch__faiss
|
tests/test_index_accuracy.py
|
{
"start": 18657,
"end": 19682
}
|
class ____(unittest.TestCase):
def test_flat_1d(self):
rs = np.random.RandomState(123545)
k = 10
xb = rs.uniform(size=(100, 1)).astype("float32")
# make sure to test below and above
xq = rs.uniform(size=(1000, 1)).astype("float32") * 1.1 - 0.05
ref = faiss.IndexFlatL2(1)
ref.add(xb)
ref_D, ref_I = ref.search(xq, k)
new = faiss.IndexFlat1D()
new.add(xb)
new_D, new_I = new.search(xq, 10)
ndiff = (np.abs(ref_I - new_I) != 0).sum()
assert ndiff < 100
new_D = new_D ** 2
max_diff_D = np.abs(ref_D - new_D).max()
assert max_diff_D < 1e-5
def test_size_0(self):
# just make sure it does not crash on small nb
index = faiss.IndexFlat1D()
rs = np.random.RandomState(123)
for i in range(3):
x = np.array([[rs.rand()]])
D, I = index.search(x, 10)
self.assertEqual((I == -1).sum(), 10 - i)
index.add(x)
|
TestFlat1D
|
python
|
google__pytype
|
pytype/tests/test_methods2.py
|
{
"start": 3457,
"end": 5422
}
|
class ____(test_base.BaseTest):
"""Test python3-specific method features."""
def test_init_subclass_classmethod(self):
"""__init_subclass__ should be promoted to a classmethod."""
self.Check("""
from typing import Type
_REGISTERED_BUILDERS = {}
class A():
def __init_subclass__(cls, **kwargs):
_REGISTERED_BUILDERS['name'] = cls
def get_builder(name: str) -> Type[A]:
return _REGISTERED_BUILDERS[name]
""")
def test_pass_through_typevar(self):
self.Check("""
from typing import TypeVar
F = TypeVar('F')
def f(x: F) -> F:
return x
class A:
def f(self, x: float) -> float:
return x
g = f(A().f)
assert_type(g(0), float)
""")
def test_dunder_self(self):
self.Check("""
from typing import Type
class A:
def foo(self):
return 42
@classmethod
def bar(cls):
return cls()
a = A().foo.__self__
b = A.bar.__self__
assert_type(a, A)
assert_type(b, Type[A])
""")
def test_signature_inference(self):
ty = self.Infer("""
class C:
def __init__(self, fn1, fn2):
self._fn1 = fn1
self._fn2 = fn2
def f(self, x):
self._fn1(x)
self._fn2(x=x)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class C:
def __init__(self, fn1, fn2) -> None: ...
def f(self, x) -> None: ...
def _fn1(self, _1) -> Any: ...
def _fn2(self, x) -> Any: ...
""",
)
def test_func(self):
ty = self.Infer("""
class Foo:
def f(self):
pass
f = Foo().f.__func__
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def f(self) -> None: ...
def f(self: Foo) -> None: ...
""",
)
if __name__ == "__main__":
test_base.main()
|
TestMethodsPy3
|
python
|
faif__python-patterns
|
patterns/behavioral/mediator.py
|
{
"start": 335,
"end": 487
}
|
class ____:
"""Mediator class"""
def display_message(self, user: User, message: str) -> None:
return f"[{user} says]: {message}"
|
ChatRoom
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/blocks/definition.py
|
{
"start": 1407,
"end": 1754
}
|
class ____(BlocksExtension):
"""Definition Blocks Extension."""
def extendMarkdownBlocks(self, md, block_mgr):
"""Extend Markdown blocks."""
block_mgr.register(Definition, self.getConfigs())
def makeExtension(*args, **kwargs):
"""Return extension."""
return DefinitionExtension(*args, **kwargs)
|
DefinitionExtension
|
python
|
getsentry__sentry
|
src/sentry/models/organizationonboardingtask.py
|
{
"start": 905,
"end": 1315
}
|
class ____(enum.IntEnum):
FIRST_PROJECT = 1
FIRST_EVENT = 2
INVITE_MEMBER = 3
SECOND_PLATFORM = 4
RELEASE_TRACKING = 6
SOURCEMAPS = 7
ALERT_RULE = 10
FIRST_TRANSACTION = 11
SESSION_REPLAY = 14
REAL_TIME_NOTIFICATIONS = 15
LINK_SENTRY_TO_SOURCE_CODE = 16
@classmethod
def values(cls) -> list[int]:
return [member.value for member in cls]
|
OnboardingTask
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/waiters/test_bedrock.py
|
{
"start": 1894,
"end": 2112
}
|
class ____:
@pytest.fixture(autouse=True)
def mock_conn(self, monkeypatch):
self.client = boto3.client("bedrock")
monkeypatch.setattr(BedrockHook, "conn", self.client)
|
TestBedrockCustomWaitersBase
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/utils/temp_dir.py
|
{
"start": 2137,
"end": 6612
}
|
class ____:
"""Helper class that owns and cleans up a temporary directory.
This class can be used as a context manager or as an OO representation of a
temporary directory.
Attributes:
path
Location to the created temporary directory
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
Methods:
cleanup()
Deletes the temporary directory
When used as a context manager, if the delete attribute is True, on
exiting the context the temporary directory is deleted.
"""
def __init__(
self,
path: Optional[str] = None,
delete: Union[bool, None, _Default] = _default,
kind: str = "temp",
globally_managed: bool = False,
ignore_cleanup_errors: bool = True,
):
super().__init__()
if delete is _default:
if path is not None:
# If we were given an explicit directory, resolve delete option
# now.
delete = False
else:
# Otherwise, we wait until cleanup and see what
# tempdir_registry says.
delete = None
# The only time we specify path is in for editables where it
# is the value of the --src option.
if path is None:
path = self._create(kind)
self._path = path
self._deleted = False
self.delete = delete
self.kind = kind
self.ignore_cleanup_errors = ignore_cleanup_errors
if globally_managed:
assert _tempdir_manager is not None
_tempdir_manager.enter_context(self)
@property
def path(self) -> str:
assert not self._deleted, f"Attempted to access deleted path: {self._path}"
return self._path
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.path!r}>"
def __enter__(self: _T) -> _T:
return self
def __exit__(self, exc: Any, value: Any, tb: Any) -> None:
if self.delete is not None:
delete = self.delete
elif _tempdir_registry:
delete = _tempdir_registry.get_delete(self.kind)
else:
delete = True
if delete:
self.cleanup()
def _create(self, kind: str) -> str:
"""Create a temporary directory and store its path in self.path"""
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
logger.debug("Created temporary directory: %s", path)
return path
def cleanup(self) -> None:
"""Remove the temporary directory created and reset state"""
self._deleted = True
if not os.path.exists(self._path):
return
errors: List[BaseException] = []
def onerror(
func: Callable[..., Any],
path: Path,
exc_val: BaseException,
) -> None:
"""Log a warning for a `rmtree` error and continue"""
formatted_exc = "\n".join(
traceback.format_exception_only(type(exc_val), exc_val)
)
formatted_exc = formatted_exc.rstrip() # remove trailing new line
if func in (os.unlink, os.remove, os.rmdir):
logger.debug(
"Failed to remove a temporary file '%s' due to %s.\n",
path,
formatted_exc,
)
else:
logger.debug("%s failed with %s.", func.__qualname__, formatted_exc)
errors.append(exc_val)
if self.ignore_cleanup_errors:
try:
# first try with @retry; retrying to handle ephemeral errors
rmtree(self._path, ignore_errors=False)
except OSError:
# last pass ignore/log all errors
rmtree(self._path, onexc=onerror)
if errors:
logger.warning(
"Failed to remove contents in a temporary directory '%s'.\n"
"You can safely remove it manually.",
self._path,
)
else:
rmtree(self._path)
|
TempDirectory
|
python
|
charliermarsh__ruff
|
crates/ty_python_semantic/resources/corpus/92_qual_class_in_class.py
|
{
"start": 0,
"end": 40
}
|
class ____:
class Foo:
pass
|
Bar
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/filters.py
|
{
"start": 1470,
"end": 3088
}
|
class ____(OrderingFilter):
"""Organization list sort ordering django_filters filter."""
SORT_NAME = "name"
SORT_CREATE_DATE = "pub_date"
def __init__(self, *args, **kwargs):
# The default filtering operation will be `name`, so we omit it
# from choices to avoid showing it on the list twice.
kwargs.setdefault("empty_label", _("Name"))
kwargs.setdefault(
"choices",
(
("-" + self.SORT_CREATE_DATE, _("Recently created")),
(self.SORT_CREATE_DATE, _("Least recently created")),
(self.SORT_NAME, _("Name")),
("-" + self.SORT_NAME, _("Name (descending)")),
),
)
super().__init__(*args, **kwargs)
def filter(self, qs, value):
# We use the None value from the custom filter, which django-filters is
# a bit opinionated about. This is an explicit check for ``None``
# instead of setting some default value, purposely to make display of
# the unused/default filter correct.
if not value:
value = [self.SORT_NAME]
order_bys = []
for field_ordered in value:
field = field_ordered.lstrip("-")
if field_ordered == self.SORT_CREATE_DATE:
order_bys.append(F(field).desc(nulls_last=True))
elif field_ordered == "-" + self.SORT_CREATE_DATE:
order_bys.append(F(field).asc(nulls_first=True))
else:
order_bys.append(field_ordered)
return qs.order_by(*order_bys)
|
OrganizationSortOrderingFilter
|
python
|
numpy__numpy
|
numpy/distutils/system_info.py
|
{
"start": 39102,
"end": 40733
}
|
class ____(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
opt = self.get_option_single(self.section + '_libs', 'libraries')
libs = self.get_libs(opt, ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
|
fftw_info
|
python
|
Pylons__pyramid
|
tests/test_response.py
|
{
"start": 6010,
"end": 6352
}
|
class ____(unittest.TestCase):
def test_get_factory(self):
from pyramid.registry import Registry
from pyramid.response import Response, _get_response_factory
registry = Registry()
response = _get_response_factory(registry)(None)
self.assertTrue(isinstance(response, Response))
|
TestGetResponseFactory
|
python
|
weaviate__weaviate-python-client
|
weaviate/rbac/models.py
|
{
"start": 1392,
"end": 1470
}
|
class ____(TypedDict):
role: str
scope: NotRequired[str]
|
PermissionRoles
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
|
{
"start": 148,
"end": 415
}
|
class ____(MyBaseException):
def __init__(self, *args):
super().__init__(*args)
def method_on_exception(self):
return "method_on_exception"
def __str__(self):
return "ExceptionAndClass Str: %s" % super().__str__()
|
ExceptionAndClass
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/transfers/bigquery_to_postgres.py
|
{
"start": 1445,
"end": 5242
}
|
class ____(BigQueryToSqlBaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert into PostgreSQL table.
Due to constraints of the PostgreSQL's ON CONFLICT clause both `selected_fields` and `replace_index`
parameters need to be specified when using the operator with parameter `replace=True`.
In effect this means that in order to run this operator with `replace=True` your target table MUST
already have a unique index column / columns, otherwise the INSERT command will fail with an error.
See more at https://www.postgresql.org/docs/current/sql-insert.html.
Please note that currently most of the clauses that can be used with PostgreSQL's INSERT
command, such as ON CONSTRAINT, WHERE, DEFAULT, etc., are not supported by this operator.
If you need the clauses for your queries, `SQLExecuteQueryOperator` will be a more suitable option.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToPostgresOperator`
:param target_table_name: target Postgres table (templated)
:param postgres_conn_id: Reference to :ref:`postgres connection id <howto/connection:postgres>`.
:param replace: Whether to replace instead of insert
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned. Must be specified if `replace` is True
:param replace_index: the column or list of column names to act as
index for the ON CONFLICT clause. Must be specified if `replace` is True
"""
def __init__(
self,
*,
target_table_name: str,
postgres_conn_id: str = "postgres_default",
replace: bool = False,
selected_fields: list[str] | str | None = None,
replace_index: list[str] | str | None = None,
**kwargs,
) -> None:
if replace and not (selected_fields and replace_index):
raise ValueError("PostgreSQL ON CONFLICT upsert syntax requires column names and a unique index.")
super().__init__(
target_table_name=target_table_name, replace=replace, selected_fields=selected_fields, **kwargs
)
self.postgres_conn_id = postgres_conn_id
self.replace_index = replace_index
@cached_property
def postgres_hook(self) -> PostgresHook:
register_adapter(list, Json)
register_adapter(dict, Json)
return PostgresHook(database=self.database, postgres_conn_id=self.postgres_conn_id)
def get_sql_hook(self) -> PostgresHook:
return self.postgres_hook
def execute(self, context: Context) -> None:
if not self.bigquery_hook:
self.bigquery_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
# Set source_project_dataset_table here, after hooks are initialized and project_id is available
project_id = self.bigquery_hook.project_id
self.source_project_dataset_table = f"{project_id}.{self.dataset_id}.{self.table_id}"
self.persist_links(context)
for rows in bigquery_get_data(
self.log,
self.dataset_id,
self.table_id,
self.bigquery_hook,
self.batch_size,
self.selected_fields,
):
self.postgres_hook.insert_rows(
table=self.target_table_name,
rows=rows,
target_fields=self.selected_fields,
replace=self.replace,
commit_every=self.batch_size,
replace_index=self.replace_index,
)
|
BigQueryToPostgresOperator
|
python
|
getsentry__sentry
|
tests/sentry/integrations/github/test_issues.py
|
{
"start": 3680,
"end": 26450
}
|
class ____(TestCase, PerformanceIssueTestCase, IntegratedApiTestCase):
@cached_property
def request(self):
return RequestFactory()
def setUp(self) -> None:
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization,
provider="github",
external_id="github_external_id",
name="getsentry",
)
install = self.integration.get_installation(self.organization.id)
self.install = cast(GitHubIntegration, install)
self.min_ago = before_now(minutes=1).isoformat()
self.repo = "getsentry/sentry"
def _generate_pagination_responses(
self, api_url: str, data: Sequence[Mapping[str, str]], per_page_limit: int
) -> None:
pages = len(data) // per_page_limit + 1
for page in range(1, pages + 1):
params = (
f"per_page={per_page_limit}&page={page}"
if page != 1
else f"per_page={per_page_limit}"
)
link = _get_link_header(api_url, page, per_page_limit, pages)
responses.add(
responses.GET,
f"{api_url}?{params}",
json=_get_page_data(data, page, per_page_limit),
headers={"Link": link} if link else None,
)
@fixture(autouse=True)
def stub_get_jwt(self):
with patch.object(client, "get_jwt", return_value="jwt_token_1"):
yield
def _check_proxying(self) -> None:
assert self.install.org_integration is not None
for call_request in responses.calls:
request = call_request.request
assert request.headers[PROXY_OI_HEADER] == str(self.install.org_integration.id)
assert request.headers[PROXY_BASE_URL_HEADER] == "https://api.github.com"
assert PROXY_SIGNATURE_HEADER in request.headers
@responses.activate
def test_get_allowed_assignees(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/assignees",
json=[{"login": "MeredithAnya"}],
)
assert self.install.get_allowed_assignees(self.repo) == (
("", "Unassigned"),
("MeredithAnya", "MeredithAnya"),
)
if self.should_call_api_without_proxying():
assert len(responses.calls) == 2
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer jwt_token_1"
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer token_1"
else:
self._check_proxying()
@responses.activate
def test_get_repo_labels(self) -> None:
"""Test that labels are fetched using pagination when the feature flag is enabled."""
responses.add(
responses.POST,
"https://api.github.com/app/installations/github_external_id/access_tokens",
json={"token": "token_1", "expires_at": "2018-10-11T22:14:10Z"},
)
per_page_limit = 5
# An extra label to test pagination
labels = [
{"name": "bug"},
{"name": "enhancement"},
{"name": "duplicate"},
{"name": "1"},
{"name": "10"},
{"name": "2"},
]
api_url = "https://api.github.com/repos/getsentry/sentry/labels"
self._generate_pagination_responses(api_url, labels, per_page_limit)
with patch(
"sentry.integrations.github.client.GitHubBaseClient.page_size", new=len(labels) - 1
):
# results should be sorted alphabetically
assert self.install.get_repo_labels("getsentry", "sentry") == (
("1", "1"),
("2", "2"),
("10", "10"),
("bug", "bug"),
("duplicate", "duplicate"),
("enhancement", "enhancement"),
)
if self.should_call_api_without_proxying():
assert len(responses.calls) == 2
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer jwt_token_1"
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer token_1"
else:
self._check_proxying()
@responses.activate
def test_create_issue(self) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
json={
"number": 321,
"title": "hello",
"body": "This is the description",
"html_url": "https://github.com/getsentry/sentry/issues/231",
},
)
form_data = {
"repo": "getsentry/sentry",
"title": "hello",
"description": "This is the description",
}
assert self.install.create_issue(form_data) == {
"key": 321,
"description": "This is the description",
"title": "hello",
"url": "https://github.com/getsentry/sentry/issues/231",
"repo": "getsentry/sentry",
}
if self.should_call_api_without_proxying():
assert len(responses.calls) == 2
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer jwt_token_1"
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer token_1"
payload = orjson.loads(request.body)
assert payload == {
"body": "This is the description",
"assignee": None,
"title": "hello",
"labels": None,
}
else:
self._check_proxying()
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@responses.activate
def test_create_issue_with_invalid_field(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
status=422,
json={
"message": "Validation Failed",
"errors": [
{
"value": "example_username",
"resource": "Issue",
"field": "assignee",
"code": "invalid",
}
],
"documentation_url": "https://docs.github.com/rest/issues/issues#create-an-issue",
"status": "422",
},
)
form_data = {
"repo": "getsentry/sentry",
"title": "hello",
"description": "This is the description",
}
with pytest.raises(IntegrationFormError) as e:
self.install.create_issue(form_data)
assert e.value.field_errors == {
"assignee": "Got invalid value: example_username for field: assignee"
}
@responses.activate
def test_create_issue_with_bad_github_repo(self) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
status=410,
json={
"message": "Issues are disabled for this repo",
"documentation_url": "https://docs.github.com/v3/issues/",
"status": "410",
},
)
form_data = {
"repo": "getsentry/sentry",
"title": "hello",
"description": "This is the description",
}
with pytest.raises(IntegrationConfigurationError) as e:
self.install.create_issue(form_data)
assert (
e.value.args[0]
== "Issues are disabled for this repository, please check your repository permissions"
)
@responses.activate
def test_create_issue_with_bad_github_repo_permissions(self) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
status=403,
json={
"message": "Repository was archived so is read-only.",
"documentation_url": "https://docs.github.com/rest/issues/issues#create-an-issue",
"status": "403",
},
)
form_data = {
"repo": "getsentry/sentry",
"title": "hello",
"description": "This is the description",
}
with pytest.raises(IntegrationConfigurationError) as e:
self.install.create_issue(form_data)
assert e.value.args[0] == "Repository was archived so is read-only."
@responses.activate
def test_create_issue_raises_integration_error(self) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
status=500,
json={
"message": "dang snap!",
"documentation_url": "https://docs.github.com/v3/issues/",
"status": "500",
},
)
form_data = {
"repo": "getsentry/sentry",
"title": "hello",
"description": "This is the description",
}
with pytest.raises(IntegrationError) as e:
self.install.create_issue(form_data)
assert e.value.args[0] == "Error Communicating with GitHub (HTTP 500): dang snap!"
def test_performance_issues_content(self) -> None:
"""Test that a GitHub issue created from a performance issue has the expected title and description"""
event = self.create_performance_issue()
assert event.group is not None
description = self.install.get_group_description(event.group, event)
assert "db - SELECT `books_author`.`id`, `books_author" in description
title = self.install.get_group_title(event.group, event)
assert title == "N+1 Query"
def test_generic_issues_content(self) -> None:
"""Test that a GitHub issue created from a generic issue has the expected title and description"""
occurrence = TEST_ISSUE_OCCURRENCE
event = self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=self.project.id,
)
group_event = event.for_group(event.groups[0])
group_event.occurrence = occurrence
description = self.install.get_group_description(group_event.group, group_event)
assert occurrence.evidence_display[0].value in description
assert occurrence.evidence_display[1].value in description
assert occurrence.evidence_display[2].value in description
title = self.install.get_group_title(group_event.group, group_event)
assert title == occurrence.issue_title
def test_error_issues_content(self) -> None:
"""Test that a GitHub issue created from an error issue has the expected title and descriptionn"""
event = self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=self.project.id,
)
assert event.group is not None
description = self.install.get_group_description(event.group, event)
assert "oh no" in description
title = self.install.get_group_title(event.group, event)
assert title == event.title
@responses.activate
def test_link_issue(self) -> None:
issue_id = "321"
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/issues/321",
json={
"number": issue_id,
"title": "hello",
"body": "This is the description",
"html_url": "https://github.com/getsentry/sentry/issues/231",
},
)
data = {"repo": "getsentry/sentry", "externalIssue": issue_id, "comment": "hello"}
assert self.install.get_issue(issue_id, data=data) == {
"key": issue_id,
"description": "This is the description",
"title": "hello",
"url": "https://github.com/getsentry/sentry/issues/231",
"repo": "getsentry/sentry",
}
if self.should_call_api_without_proxying():
assert len(responses.calls) == 2
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer jwt_token_1"
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer token_1"
else:
self._check_proxying()
@responses.activate
def test_repo_dropdown_choices(self) -> None:
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/assignees",
json=[{"login": "MeredithAnya"}],
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/labels",
json=[{"name": "bug"}, {"name": "enhancement"}],
)
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={
"total_count": 2,
"repositories": [
{"full_name": "getsentry/sentry", "name": "sentry"},
{"full_name": "getsentry/other", "name": "other", "archived": True},
],
},
)
resp = self.install.get_create_issue_config(group=event.group, user=self.user)
assert resp[0]["choices"] == [("getsentry/sentry", "sentry")]
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/hello/assignees",
json=[{"login": "MeredithAnya"}],
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/hello/labels",
json=[{"name": "bug"}, {"name": "enhancement"}],
)
# create an issue
data = {"params": {"repo": "getsentry/hello"}}
resp = self.install.get_create_issue_config(group=event.group, user=self.user, **data)
assert resp[0]["choices"] == [
("getsentry/hello", "hello"),
("getsentry/sentry", "sentry"),
]
# link an issue
data = {"params": {"repo": "getsentry/hello"}}
assert event.group is not None
resp = self.install.get_link_issue_config(group=event.group, **data)
assert resp[0]["choices"] == [
("getsentry/hello", "hello"),
("getsentry/sentry", "sentry"),
]
@responses.activate
def test_linked_issue_comment(self) -> None:
issue_event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
feedback_issue = self.create_group(project=self.project, type=FeedbackGroup.type_id)
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={
"total_count": 2,
"repositories": [
{"full_name": "getsentry/sentry", "name": "sentry"},
{"full_name": "getsentry/other", "name": "other", "archived": True},
],
},
)
# link an issue
data = {"params": {"repo": "getsentry/hello"}}
assert issue_event.group is not None
resp = self.install.get_link_issue_config(group=issue_event.group, **data)
# assert comment wording for linked issue is correct
assert "Sentry Issue" in resp[2]["default"]
# link a feedback issue
resp = self.install.get_link_issue_config(group=feedback_issue, **data)
# assert comment wording for linked feedback is correct
assert "Sentry Feedback" in resp[2]["default"]
# ensure linked comment is a string
assert isinstance(resp[1]["default"], str)
assert isinstance(resp[0]["default"], str)
@responses.activate
def after_link_issue(self):
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues/321/comments",
json={"body": "hello"},
)
data = {"comment": "hello"}
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.id,
key="hello#321",
)
self.install.after_link_issue(external_issue, data=data)
request = responses.calls[0].request
assert request.headers["Authorization"] == b"Bearer jwt_token_1"
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer token_1"
payload = orjson.loads(request.body)
assert payload == {"body": "hello"}
@responses.activate
def test_default_repo_link_fields(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={
"total_count": 1,
"repositories": [{"name": "sentry", "full_name": "getsentry/sentry"}],
},
)
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
assert event.group is not None
group = event.group
assert self.install.org_integration is not None
integration_service.update_organization_integration(
org_integration_id=self.install.org_integration.id,
config={
"project_issue_defaults": {str(group.project_id): {"repo": "getsentry/sentry"}}
},
)
fields = self.install.get_link_issue_config(group)
for field in fields:
if field["name"] == "repo":
repo_field = field
break
assert repo_field["default"] == "getsentry/sentry"
@responses.activate
def test_default_repo_create_fields(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={
"total_count": 1,
"repositories": [{"name": "sentry", "full_name": "getsentry/sentry"}],
},
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/assignees",
json=[{"login": "MeredithAnya"}],
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/labels",
json=[{"name": "bug"}, {"name": "enhancement"}],
)
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
assert event.group is not None
group = event.group
assert self.install.org_integration is not None
integration_service.update_organization_integration(
org_integration_id=self.install.org_integration.id,
config={
"project_issue_defaults": {str(group.project_id): {"repo": "getsentry/sentry"}}
},
)
fields = self.install.get_create_issue_config(group, self.user)
for field in fields:
if field["name"] == "repo":
repo_field = field
break
assert repo_field["default"] == "getsentry/sentry"
@responses.activate
def test_default_repo_link_fields_no_repos(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={"total_count": 0, "repositories": []},
)
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
assert event.group is not None
fields = self.install.get_link_issue_config(event.group)
repo_field = [field for field in fields if field["name"] == "repo"][0]
assert repo_field["default"] == ""
assert repo_field["choices"] == []
@responses.activate
def test_default_repo_create_fields_no_repos(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={"total_count": 0, "repositories": []},
)
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago}, project_id=self.project.id
)
fields = self.install.get_create_issue_config(event.group, self.user)
repo_field = [field for field in fields if field["name"] == "repo"][0]
assignee_field = [field for field in fields if field["name"] == "assignee"][0]
assert repo_field["default"] == ""
assert repo_field["choices"] == []
assert assignee_field["default"] == ""
assert assignee_field["choices"] == []
def _get_page_data(
data: Sequence[Mapping[str, str]], page: int, per_page_limit: int
) -> Sequence[Mapping[str, str]]:
start = per_page_limit * (page - 1)
end = per_page_limit * page
return data[start:end]
def _get_link_header(api_url: str, page: int, per_page_limit: int, pages: int) -> str:
if pages == 1:
return ""
list_of_links = []
first_link = f'<{api_url}?per_page={per_page_limit}&page=1>; rel="first"'
last_link = f'<{api_url}?per_page={per_page_limit}&page={pages}>; rel="last"'
next_link = f'<{api_url}?per_page={per_page_limit}&page={page + 1}>; rel="next"'
prev_link = f'<{api_url}?per_page={per_page_limit}&page={page - 1}>; rel="prev"'
if page != 1:
list_of_links.append(first_link)
if page == pages:
list_of_links.append(last_link)
if page != pages:
list_of_links.append(next_link)
if page != 1:
list_of_links.append(prev_link)
return ", ".join(list_of_links) if len(list_of_links) > 0 else ""
|
GitHubIssueBasicTest
|
python
|
pydata__xarray
|
xarray/namedarray/_typing.py
|
{
"start": 5833,
"end": 6083
}
|
class ____(
_array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co]
):
"""
Minimal chunked duck array.
Corresponds to np.ndarray.
"""
@property
def chunks(self) -> _Chunks: ...
@runtime_checkable
|
_chunkedarray
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/relay/project_ids.py
|
{
"start": 431,
"end": 1509
}
|
class ____(Endpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = (RelayAuthentication,)
permission_classes = (RelayPermission,)
owner = ApiOwner.OWNERS_INGEST
def post(self, request: Request) -> Response:
relay = request.relay
assert relay is not None # should be provided during Authentication
project_ids = {}
for public_key in request.relay_request_data.get("publicKeys") or ():
if not ProjectKey.looks_like_api_key(public_key):
continue
try:
pk = ProjectKey.objects.get_from_cache(public_key=public_key)
except ProjectKey.DoesNotExist:
continue
# NB: Do not validate pk here (is_active or store). Relay should
# also receive a mapping for disabled public keys and then perform
# the full project config fetch.
project_ids[public_key] = pk.project_id
return Response({"projectIds": project_ids}, status=200)
|
RelayProjectIdsEndpoint
|
python
|
kamyu104__LeetCode-Solutions
|
Python/palindrome-permutation-ii.py
|
{
"start": 1164,
"end": 1633
}
|
class ____(object):
def generatePalindromes(self, s):
"""
:type s: str
:rtype: List[str]
"""
cnt = collections.Counter(s)
mid = tuple(k for k, v in cnt.iteritems() if v % 2)
chars = ''.join(k * (v / 2) for k, v in cnt.iteritems())
return [''.join(half_palindrome + mid + half_palindrome[::-1]) \
for half_palindrome in set(itertools.permutations(chars))] if len(mid) < 2 else []
|
Solution2
|
python
|
imageio__imageio
|
imageio/plugins/swf.py
|
{
"start": 1892,
"end": 11755
}
|
class ____(Format):
"""See :mod:`imageio.plugins.swf`"""
def _can_read(self, request):
tmp = request.firstbytes[0:3].decode("ascii", "ignore")
if tmp in ("FWS", "CWS"):
return True
def _can_write(self, request):
if request.extension in self.extensions:
return True
# -- reader
class Reader(Format.Reader):
def _open(self, loop=False):
if not _swf:
load_lib()
self._arg_loop = bool(loop)
self._fp = self.request.get_file()
# Check file ...
tmp = self.request.firstbytes[0:3].decode("ascii", "ignore")
if tmp == "FWS":
pass # OK
elif tmp == "CWS":
# Compressed, we need to decompress
bb = self._fp.read()
bb = bb[:8] + zlib.decompress(bb[8:])
# Wrap up in a file object
self._fp = BytesIO(bb)
else:
raise IOError("This does not look like a valid SWF file")
# Skip first bytes. This also tests support got seeking ...
try:
self._fp.seek(8)
self._streaming_mode = False
except Exception:
self._streaming_mode = True
self._fp_read(8)
# Skip header
# Note that the number of frames is there, which we could
# potentially use, but the number of frames does not necessarily
# correspond to the number of images.
nbits = _swf.bits2int(self._fp_read(1), 5)
nbits = 5 + nbits * 4
Lrect = nbits / 8.0
if Lrect % 1:
Lrect += 1
Lrect = int(Lrect)
self._fp_read(Lrect + 3)
# Now the rest is basically tags ...
self._imlocs = [] # tuple (loc, sze, T, L1)
if not self._streaming_mode:
# Collect locations of frame, while skipping through the data
# This does not read any of the tag *data*.
try:
while True:
isimage, sze, T, L1 = self._read_one_tag()
loc = self._fp.tell()
if isimage:
# Still need to check if the format is right
format = ord(self._fp_read(3)[2:])
if format == 5: # RGB or RGBA lossless
self._imlocs.append((loc, sze, T, L1))
self._fp.seek(loc + sze) # Skip over tag
except IndexError:
pass # done reading
def _fp_read(self, n):
return read_n_bytes(self._fp, n)
def _close(self):
pass
def _get_length(self):
if self._streaming_mode:
return np.inf
else:
return len(self._imlocs)
def _get_data(self, index):
# Check index
if index < 0:
raise IndexError("Index in swf file must be > 0")
if not self._streaming_mode:
if self._arg_loop and self._imlocs:
index = index % len(self._imlocs)
if index >= len(self._imlocs):
raise IndexError("Index out of bounds")
if self._streaming_mode:
# Walk over tags until we find an image
while True:
isimage, sze, T, L1 = self._read_one_tag()
bb = self._fp_read(sze) # always read data
if isimage:
im = _swf.read_pixels(bb, 0, T, L1) # can be None
if im is not None:
return im, {}
else:
# Go to corresponding location, read data, and convert to image
loc, sze, T, L1 = self._imlocs[index]
self._fp.seek(loc)
bb = self._fp_read(sze)
# Read_pixels should return ndarry, since we checked format
im = _swf.read_pixels(bb, 0, T, L1)
return im, {}
def _read_one_tag(self):
"""
Return (True, loc, size, T, L1) if an image that we can read.
Return (False, loc, size, T, L1) if any other tag.
"""
# Get head
head = self._fp_read(6)
if not head: # pragma: no cover
raise IndexError("Reached end of swf movie")
# Determine type and length
T, L1, L2 = _swf.get_type_and_len(head)
if not L2: # pragma: no cover
raise RuntimeError("Invalid tag length, could not proceed")
# Read data
isimage = False
sze = L2 - 6
# bb = self._fp_read(L2 - 6)
# Parse tag
if T == 0:
raise IndexError("Reached end of swf movie")
elif T in [20, 36]:
isimage = True
# im = _swf.read_pixels(bb, 0, T, L1) # can be None
elif T in [6, 21, 35, 90]: # pragma: no cover
logger.warning("Ignoring JPEG image: cannot read JPEG.")
else:
pass # Not an image tag
# Done. Return image. Can be None
# return im
return isimage, sze, T, L1
def _get_meta_data(self, index):
return {} # This format does not support meta data
# -- writer
class Writer(Format.Writer):
def _open(self, fps=12, loop=True, html=False, compress=False):
if not _swf:
load_lib()
self._arg_fps = int(fps)
self._arg_loop = bool(loop)
self._arg_html = bool(html)
self._arg_compress = bool(compress)
self._fp = self.request.get_file()
self._framecounter = 0
self._framesize = (100, 100)
# For compress, we use an in-memory file object
if self._arg_compress:
self._fp_real = self._fp
self._fp = BytesIO()
def _close(self):
self._complete()
# Get size of (uncompressed) file
sze = self._fp.tell()
# set nframes, this is in the potentially compressed region
self._fp.seek(self._location_to_save_nframes)
self._fp.write(_swf.int2uint16(self._framecounter))
# Compress body?
if self._arg_compress:
bb = self._fp.getvalue()
self._fp = self._fp_real
self._fp.write(bb[:8])
self._fp.write(zlib.compress(bb[8:]))
sze = self._fp.tell() # renew sze value
# set size
self._fp.seek(4)
self._fp.write(_swf.int2uint32(sze))
self._fp = None # Disable
# Write html?
if self._arg_html and os.path.isfile(self.request.filename):
dirname, fname = os.path.split(self.request.filename)
filename = os.path.join(dirname, fname[:-4] + ".html")
w, h = self._framesize
html = HTML % (fname, w, h, fname)
with open(filename, "wb") as f:
f.write(html.encode("utf-8"))
def _write_header(self, framesize, fps):
self._framesize = framesize
# Called as soon as we know framesize; when we get first frame
bb = b""
bb += "FC"[self._arg_compress].encode("ascii")
bb += "WS".encode("ascii") # signature bytes
bb += _swf.int2uint8(8) # version
bb += "0000".encode("ascii") # FileLength (leave open for now)
bb += (
_swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
)
bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate
self._location_to_save_nframes = len(bb)
bb += "00".encode("ascii") # nframes (leave open for now)
self._fp.write(bb)
# Write some initial tags
taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0)
for tag in taglist:
self._fp.write(tag.get_tag())
def _complete(self):
# What if no images were saved?
if not self._framecounter:
self._write_header((10, 10), self._arg_fps)
# Write stop tag if we do not loop
if not self._arg_loop:
self._fp.write(_swf.DoActionTag("stop").get_tag())
# finish with end tag
self._fp.write("\x00\x00".encode("ascii"))
def _append_data(self, im, meta):
# Correct shape and type
if im.ndim == 3 and im.shape[-1] == 1:
im = im[:, :, 0]
im = image_as_uint(im, bitdepth=8)
# Get frame size
wh = im.shape[1], im.shape[0]
# Write header on first frame
isfirstframe = False
if self._framecounter == 0:
isfirstframe = True
self._write_header(wh, self._arg_fps)
# Create tags
bm = _swf.BitmapTag(im)
sh = _swf.ShapeTag(bm.id, (0, 0), wh)
po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe))
sf = _swf.ShowFrameTag()
# Write tags
for tag in [bm, sh, po, sf]:
self._fp.write(tag.get_tag())
self._framecounter += 1
def set_meta_data(self, meta):
pass
HTML = """
<!DOCTYPE html>
<html>
<head>
<title>Show Flash animation %s</title>
</head>
<body>
<embed width="%i" height="%i" src="%s">
</html>
"""
|
SWFFormat
|
python
|
huggingface__transformers
|
tests/models/pop2piano/test_modeling_pop2piano.py
|
{
"start": 25264,
"end": 29613
}
|
class ____(unittest.TestCase):
@slow
def test_mel_conditioner_integration(self):
composer = "composer1"
model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano")
input_embeds = torch.ones([10, 100, 512])
composer_value = model.generation_config.composer_to_feature_token[composer]
composer_value = torch.tensor(composer_value)
composer_value = composer_value.repeat(input_embeds.size(0))
outputs = model.mel_conditioner(
input_embeds, composer_value, min(model.generation_config.composer_to_feature_token.values())
)
# check shape
self.assertEqual(outputs.size(), torch.Size([10, 101, 512]))
# check values
EXPECTED_OUTPUTS = torch.tensor(
[[1.0475305318832397, 0.29052114486694336, -0.47778210043907166], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
)
torch.testing.assert_close(outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4)
@slow
@require_essentia
@require_librosa
@require_scipy
def test_full_model_integration(self):
if is_librosa_available() and is_scipy_available() and is_essentia_available() and is_torch_available():
from transformers import Pop2PianoProcessor
speech_input1 = np.zeros([1_000_000], dtype=np.float32)
sampling_rate = 44_100
processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano")
input_features = processor.feature_extractor(
speech_input1, sampling_rate=sampling_rate, return_tensors="pt"
)
model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano")
outputs = model.generate(
input_features=input_features["input_features"], return_dict_in_generate=True
).sequences
# check for shapes
self.assertEqual(outputs.size(0), 70)
# check for values
self.assertEqual(outputs[0, :2].detach().cpu().numpy().tolist(), [0, 1])
# This is the test for a real music from K-Pop genre.
@slow
@require_essentia
@require_librosa
@require_scipy
def test_real_music(self):
if is_librosa_available() and is_scipy_available() and is_essentia_available() and is_torch_available():
from transformers import Pop2PianoFeatureExtractor, Pop2PianoTokenizer
model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano")
model.eval()
feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano")
tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano")
ds = load_dataset("sweetcocoa/pop2piano_ci", split="test")
output_fe = feature_extractor(
ds["audio"][0]["array"], sampling_rate=ds["audio"][0]["sampling_rate"], return_tensors="pt"
)
output_model = model.generate(input_features=output_fe["input_features"], composer="composer1")
output_tokenizer = tokenizer.batch_decode(token_ids=output_model, feature_extractor_output=output_fe)
pretty_midi_object = output_tokenizer["pretty_midi_objects"][0]
# Checking if no of notes are same
self.assertEqual(len(pretty_midi_object.instruments[0].notes), 59)
predicted_timings = []
for i in pretty_midi_object.instruments[0].notes:
predicted_timings.append(i.start)
# Checking note start timings(first 6)
EXPECTED_START_TIMINGS = [
0.4876190423965454,
0.7314285635948181,
0.9752380847930908,
1.4396371841430664,
1.6718367338180542,
1.904036283493042,
]
np.allclose(EXPECTED_START_TIMINGS, predicted_timings[:6])
# Checking note end timings(last 6)
EXPECTED_END_TIMINGS = [
12.341403007507324,
12.567797183990479,
12.567797183990479,
12.567797183990479,
12.794191360473633,
12.794191360473633,
]
np.allclose(EXPECTED_END_TIMINGS, predicted_timings[-6:])
|
Pop2PianoModelIntegrationTests
|
python
|
gevent__gevent
|
src/gevent/tests/test__makefile_ref.py
|
{
"start": 19065,
"end": 19536
}
|
class ____(Closing):
def __init__(self, task, listener, *other_sockets):
super(CleaningUp, self).__init__(listener, *other_sockets)
self.task = task
self.listener = listener
def __enter__(self):
return self.accept(self.listener)
def __exit__(self, t, v, tb):
try:
Closing.__exit__(self, t, v, tb)
finally:
self.listener = None
if __name__ == '__main__':
greentest.main()
|
CleaningUp
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/filters.py
|
{
"start": 23914,
"end": 24200
}
|
class ____(PrefectBaseModel):
"""Filter by `BlockDocument.is_anonymous`."""
eq_: Optional[bool] = Field(
default=None,
description=(
"Filter block documents for only those that are or are not anonymous."
),
)
|
BlockDocumentFilterIsAnonymous
|
python
|
jazzband__prettytable
|
tests/test_prettytable.py
|
{
"start": 45248,
"end": 45562
}
|
class ____:
def test_default_repr(self, row_prettytable: PrettyTable) -> None:
assert row_prettytable.__str__() == row_prettytable.__repr__()
def test_jupyter_repr(self, row_prettytable: PrettyTable) -> None:
assert row_prettytable._repr_html_() == row_prettytable.get_html_string()
|
TestRepr
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 5198,
"end": 5695
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = BasicModule()
self.layer2 = BasicModule()
self.scale = torch.randn(1, 10)
@classmethod
def call_and_scale(cls, scale, mod, x):
x = mod(x)
return x * scale
def forward(self, x):
x1 = self.call_and_scale(self.scale, self.layer1, x)
x2 = self.call_and_scale(self.scale, self.layer2, x)
return x1 + x2
|
ModuleClassMethodCall
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/bijector_test.py
|
{
"start": 7468,
"end": 7946
}
|
class ____(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super().__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
name="c")
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(-2., x.dtype)
|
ConstantJacobian
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/plan/step.py
|
{
"start": 3033,
"end": 6529
}
|
class ____( # pyright: ignore[reportIncompatibleVariableOverride]
NamedTuple(
"_ExecutionStep",
[
("handle", Union[StepHandle, ResolvedFromDynamicStepHandle]),
("job_name", str),
("step_input_dict", Mapping[str, StepInput]),
("step_output_dict", Mapping[str, StepOutput]),
("tags", Mapping[str, str]),
("logging_tags", Mapping[str, str]),
("key", str),
("pool", Optional[str]),
],
),
IExecutionStep,
):
"""A fully resolved step in the execution graph."""
def __new__(
cls,
handle: Union[StepHandle, ResolvedFromDynamicStepHandle],
job_name: str,
step_inputs: Sequence[StepInput],
step_outputs: Sequence[StepOutput],
tags: Optional[Mapping[str, str]],
pool: Optional[str],
logging_tags: Optional[Mapping[str, str]] = None,
key: Optional[str] = None,
):
return super().__new__(
cls,
handle=check.inst_param(handle, "handle", (StepHandle, ResolvedFromDynamicStepHandle)),
job_name=check.str_param(job_name, "job_name"),
step_input_dict={
si.name: si
for si in check.sequence_param(step_inputs, "step_inputs", of_type=StepInput)
},
step_output_dict={
so.name: so
for so in check.sequence_param(step_outputs, "step_outputs", of_type=StepOutput)
},
tags=tags or {},
pool=check.opt_str_param(pool, "pool"),
logging_tags=merge_dicts(
{
"step_key": handle.to_key(),
"job_name": job_name,
"op_name": handle.node_handle.name,
},
check.opt_mapping_param(logging_tags, "logging_tags"),
),
# mypy can't tell that if default is set, this is guaranteed to be a str
key=cast("str", check.opt_str_param(key, "key", default=handle.to_key())),
)
@property
def node_handle(self) -> "NodeHandle":
return self.handle.node_handle
@property
def op_name(self) -> str:
return self.node_handle.name
@property
def kind(self) -> StepKind:
return StepKind.COMPUTE
@property
def step_outputs(self) -> Sequence[StepOutput]:
return list(self.step_output_dict.values())
@property
def step_inputs(self) -> Sequence[StepInput]:
return list(self.step_input_dict.values())
def has_step_output(self, name: str) -> bool:
check.str_param(name, "name")
return name in self.step_output_dict
def step_output_named(self, name: str) -> StepOutput:
check.str_param(name, "name")
return self.step_output_dict[name]
def has_step_input(self, name: str) -> bool:
check.str_param(name, "name")
return name in self.step_input_dict
def step_input_named(self, name: str) -> StepInput:
check.str_param(name, "name")
return self.step_input_dict[name]
def get_execution_dependency_keys(self) -> set[str]:
deps = set()
for inp in self.step_inputs:
deps.update(inp.dependency_keys)
return deps
def get_mapping_key(self) -> Optional[str]:
if isinstance(self.handle, ResolvedFromDynamicStepHandle):
return self.handle.mapping_key
return None
|
ExecutionStep
|
python
|
getsentry__sentry
|
src/sentry/discover/translation/mep_to_eap.py
|
{
"start": 8725,
"end": 17939
}
|
class ____(NodeVisitor):
def __init__(self):
self.dropped_fields = []
super().__init__()
def visit_field_value(self, node, children):
if node.text in COLUMNS_TO_DROP:
self.dropped_fields.append(node.text)
return node.text
return column_switcheroo(node.text)[0]
def visit_function_value(self, node, children):
new_functions, dropped_functions = translate_columns([node.text])
self.dropped_fields.extend(dropped_functions)
return new_functions[0]
def generic_visit(self, node, children):
return children or node.text
def translate_query(query: str):
flattened_query: list[str] = []
tree = event_search_grammar.parse(query)
parsed = TranslationVisitor().visit(tree)
_flatten(parsed, flattened_query)
return apply_is_segment_condition("".join(flattened_query))
def translate_columns(columns, need_equation=False):
"""
@param columns: list of columns to translate
@param need_equation: whether to translate some of the functions to equation notation (usually if
the function is being used in a field/orderby)
"""
translated_columns = []
# need to drop columns after they have been translated to avoid issues with percentile()
final_columns, dropped_columns = drop_unsupported_columns(columns)
for column in final_columns:
match = fields.is_function(column)
if not match:
translated_columns.append(column_switcheroo(column)[0])
continue
translated_func, did_update = function_switcheroo(column)
if did_update:
translated_func = add_equation_prefix_if_needed(translated_func, need_equation)
translated_columns.append(translated_func)
continue
raw_function = match.group("function")
arguments = fields.parse_arguments(raw_function, match.group("columns"))
translated_arguments = []
for argument in arguments:
translated_arguments.append(column_switcheroo(argument)[0])
new_arg = ",".join(translated_arguments)
new_function = add_equation_prefix_if_needed(f"{raw_function}({new_arg})", need_equation)
translated_columns.append(new_function)
return translated_columns, dropped_columns
def translate_equations(equations):
"""
This is used to translate arithmetic equations to EAP compatible equations.
It ideally takes in equations with equation notation and returns the EAP equation with equation notation.
@param equations: list of equations to translate
@return: (translated_equations, dropped_equations)
"""
if equations is None:
return None, None
translated_equations = []
dropped_equations = []
for equation in equations:
flattened_equation: list[str] = []
# strip equation prefix
if arithmetic.is_equation(equation):
arithmetic_equation = arithmetic.strip_equation(equation)
else:
arithmetic_equation = equation
# case where equation is empty, don't try to parse it
if arithmetic_equation == "":
translated_equations.append(equation)
continue
tree = arithmetic.arithmetic_grammar.parse(arithmetic_equation)
translation_visitor = ArithmeticTranslationVisitor()
parsed = translation_visitor.visit(tree)
_flatten(parsed, flattened_equation)
# record dropped fields and equations and skip these translations
if len(translation_visitor.dropped_fields) > 0:
dropped_equations.append(
{"equation": equation, "reason": translation_visitor.dropped_fields}
)
continue
# translated equations are not returned with the equation prefix
translated_equation = "equation|" + "".join(flattened_equation)
translated_equations.append(translated_equation)
return translated_equations, dropped_equations
def translate_orderbys(orderbys, equations, dropped_equations, new_equations):
"""
This is used to translate orderbys to EAP compatible orderbys.
It ideally takes in orderbys with equation notation, function notation or fields and returns the EAP orderby with the same notation.
@return: (translated_orderbys, dropped_orderbys)
"""
if orderbys is None:
return None, None
translated_orderbys = []
dropped_orderbys = []
for orderby in orderbys:
is_negated = False
if orderby.startswith("-"):
is_negated = True
orderby_without_neg = orderby[1:]
else:
orderby_without_neg = orderby
dropped_orderby_reason = None
decoded_orderby = None
# if orderby is a predefined equation (these are usually in the format equation[index])
if re.match(INDEXED_EQUATIONS_PATTERN, orderby_without_neg):
equation_index = int(orderby_without_neg.split("[")[1].split("]")[0])
# checks if equation index is out of bounds
if len(equations) < equation_index + 1:
dropped_orderby_reason = "equation issue"
# if there are equations
elif len(equations) > 0:
selected_equation = equations[equation_index]
# if equation was dropped, drop the orderby too
if selected_equation in dropped_equations:
dropped_orderby_reason = "dropped"
decoded_orderby = (
selected_equation if not is_negated else f"-{selected_equation}"
)
else:
# check where equation is in list of new equations
translated_equation_list, _ = translate_equations([selected_equation])
try:
translated_equation = translated_equation_list[0]
new_equation_index = new_equations.index(translated_equation)
translated_orderby = [f"equation[{new_equation_index}]"]
except (IndexError, ValueError):
dropped_orderby_reason = "dropped"
decoded_orderby = (
selected_equation if not is_negated else f"-{selected_equation}"
)
else:
dropped_orderby_reason = "no equations"
decoded_orderby = orderby
# if orderby is an equation
elif arithmetic.is_equation(orderby_without_neg):
translated_orderby, dropped_orderby_equation = translate_equations(
[orderby_without_neg]
)
if len(dropped_orderby_equation) > 0:
dropped_orderby_reason = dropped_orderby_equation[0]["reason"]
# if orderby is a field/function
else:
translated_orderby, dropped_orderby = translate_columns(
[orderby_without_neg], need_equation=True
)
if len(dropped_orderby) > 0:
dropped_orderby_reason = dropped_orderby
# add translated orderby to the list and record dropped orderbys
if dropped_orderby_reason is None:
translated_orderbys.append(
translated_orderby[0] if not is_negated else f"-{translated_orderby[0]}"
)
else:
dropped_orderbys.append(
{
"orderby": orderby if decoded_orderby is None else decoded_orderby,
"reason": dropped_orderby_reason,
}
)
continue
return translated_orderbys, dropped_orderbys
def translate_mep_to_eap(query_parts: QueryParts):
"""
This is a utility used to translate transactions/metrics/mep
queries to eap queries. It takes in event query syntax (EQS)
as input and outputs EQS as well. This will allow us to
translate transaction queries from the frontend on the fly
and also allow us to migrate all our Discover/Dashboard/Alert
datamodels to store EAP compatible EQS queries.
"""
new_query = translate_query(query_parts["query"])
new_columns, dropped_columns = translate_columns(
query_parts["selected_columns"], need_equation=True
)
new_equations, dropped_equations = translate_equations(query_parts["equations"])
equations = query_parts["equations"] if query_parts["equations"] is not None else []
dropped_equations_without_reasons = (
[dropped_equation["equation"] for dropped_equation in dropped_equations]
if dropped_equations is not None
else []
)
new_orderbys, dropped_orderbys = translate_orderbys(
query_parts["orderby"], equations, dropped_equations_without_reasons, new_equations
)
eap_query = QueryParts(
query=new_query,
selected_columns=new_columns,
equations=new_equations,
orderby=new_orderbys,
)
dropped_fields = DroppedFields(
selected_columns=dropped_columns,
equations=dropped_equations,
orderby=dropped_orderbys,
)
return eap_query, dropped_fields
|
ArithmeticTranslationVisitor
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/colorbar.py
|
{
"start": 5106,
"end": 5887
}
|
class ____(mspines.Spine):
def __init__(self, axes):
self._ax = axes
super().__init__(axes, 'colorbar', mpath.Path(np.empty((0, 2))))
mpatches.Patch.set_transform(self, axes.transAxes)
def get_window_extent(self, renderer=None):
# This Spine has no Axis associated with it, and doesn't need to adjust
# its location, so we can directly get the window extent from the
# super-super-class.
return mpatches.Patch.get_window_extent(self, renderer=renderer)
def set_xy(self, xy):
self._path = mpath.Path(xy, closed=True)
self._xy = xy
self.stale = True
def draw(self, renderer):
ret = mpatches.Patch.draw(self, renderer)
self.stale = False
return ret
|
_ColorbarSpine
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/operators/map_transformer.py
|
{
"start": 12349,
"end": 14161
}
|
class ____(MapTransformFn):
"""A block-to-block MapTransformFn."""
def __init__(
self,
block_fn: MapTransformCallable[Block, Block],
*,
is_udf: bool = False,
disable_block_shaping: bool = False,
output_block_size_option: Optional[OutputBlockSizeOption] = None,
):
"""
Initializes the object with a transformation function, accompanying options, and
configuration for handling blocks during processing.
Args:
block_fn: Callable function to apply a transformation to a block.
is_udf: Specifies if the transformation function is a user-defined
function (defaults to ``False``).
disable_block_shaping: Disables block-shaping, making transformer to
produce blocks as is.
output_block_size_option: (Optional) Configure output block sizing.
"""
super().__init__(
input_type=MapTransformFnDataType.Block,
is_udf=is_udf,
output_block_size_option=output_block_size_option,
)
self._block_fn = block_fn
self._disable_block_shaping = disable_block_shaping
def _apply_transform(
self, ctx: TaskContext, blocks: Iterable[Block]
) -> Iterable[Block]:
yield from self._block_fn(blocks, ctx)
def _post_process(self, results: Iterable[MapTransformFnData]) -> Iterable[Block]:
# Short-circuit for block transformations for which no
# block-shaping is required
if self._disable_block_shaping:
return results
return self._shape_blocks(results)
def __repr__(self) -> str:
return (
f"BlockMapTransformFn({self._block_fn=}, {self._output_block_size_option=})"
)
|
BlockMapTransformFn
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/handlers/build_handler.py
|
{
"start": 530,
"end": 4095
}
|
class ____:
building = False
executor = ThreadPoolExecutor(max_workers=5)
canceled = False
_canceling = False
_kill_event = None
_future = None
def __init__(self, core_mode, app_options=None):
app_options = _ensure_options(app_options)
self.log = app_options.logger
self.core_mode = core_mode
self.app_dir = app_options.app_dir
self.core_config = app_options.core_config
self.labextensions_path = app_options.labextensions_path
@gen.coroutine
def get_status(self):
if self.core_mode:
raise gen.Return({"status": "stable", "message": ""})
if self.building:
raise gen.Return({"status": "building", "message": ""})
try:
messages = yield self._run_build_check(
self.app_dir, self.log, self.core_config, self.labextensions_path
)
status = "needed" if messages else "stable"
if messages:
self.log.warning("Build recommended")
[self.log.warning(m) for m in messages]
else:
self.log.info("Build is up to date")
except ValueError:
self.log.warning("Could not determine jupyterlab build status without nodejs")
status = "stable"
messages = []
raise gen.Return({"status": status, "message": "\n".join(messages)})
@gen.coroutine
def build(self):
if self._canceling:
msg = "Cancel in progress"
raise ValueError(msg)
if not self.building:
self.canceled = False
self._future = future = gen.Future()
self.building = True
self._kill_event = evt = Event()
try:
yield self._run_build(
self.app_dir, self.log, evt, self.core_config, self.labextensions_path
)
future.set_result(True)
except Exception as e:
if str(e) == "Aborted":
future.set_result(False)
else:
future.set_exception(e)
finally:
self.building = False
try:
yield self._future
except Exception as e:
raise e
@gen.coroutine
def cancel(self):
if not self.building:
msg = "No current build"
raise ValueError(msg)
self._canceling = True
yield self._future
self._canceling = False
self.canceled = True
@run_on_executor
def _run_build_check(self, app_dir, logger, core_config, labextensions_path):
return build_check(
app_options=AppOptions(
app_dir=app_dir,
logger=logger,
core_config=core_config,
labextensions_path=labextensions_path,
)
)
@run_on_executor
def _run_build(self, app_dir, logger, kill_event, core_config, labextensions_path):
app_options = AppOptions(
app_dir=app_dir,
logger=logger,
kill_event=kill_event,
core_config=core_config,
labextensions_path=labextensions_path,
)
try:
return build(app_options=app_options)
except Exception:
if self._kill_event.is_set():
return
self.log.warning("Build failed, running a clean and rebuild")
clean(app_options=app_options)
return build(app_options=app_options)
|
Builder
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/F842.py
|
{
"start": 76,
"end": 125
}
|
class ____:
name: str = "Bob"
age: int = 18
|
B
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 69153,
"end": 69622
}
|
class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 horoj"
assert self.locale._format_timeframe("hour", 0) == "un horo"
assert self.locale._format_timeframe("hours", -2) == "2 horoj"
assert self.locale._format_timeframe("now", 0) == "nun"
def test_ordinal_number(self):
assert self.locale.ordinal_number(1) == "1a"
@pytest.mark.usefixtures("lang_locale")
|
TestEsperantoLocale
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_dataspec.py
|
{
"start": 2666,
"end": 4036
}
|
class ____:
def test_default_value(self) -> None:
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
def test_setting_dict_sets_units(self) -> None:
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
a.x = {'value': 180, 'units': 'deg'}
assert a.x == value(180)
assert a.x_units == 'deg'
def test_setting_json_sets_units_keeps_dictness(self) -> None:
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
a.set_from_json('x', {'value': 180, 'units': 'deg'})
assert a.x == 180
assert a.x_units == 'deg'
def test_setting_dict_does_not_modify_original_dict(self) -> None:
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
new_value = {'value': 180, 'units': 'deg'}
new_value_copy = copy(new_value)
assert new_value_copy == new_value
a.x = new_value
assert a.x == value(180)
assert a.x_units == 'deg'
assert new_value_copy == new_value
|
Test_AngleSpec
|
python
|
pandas-dev__pandas
|
pandas/core/groupby/ops.py
|
{
"start": 40846,
"end": 41195
}
|
class ____(DataSplitter):
def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# Fastpath equivalent to:
# return sdata.iloc[slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1)
df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
return df.__finalize__(sdata, method="groupby")
|
FrameSplitter
|
python
|
doocs__leetcode
|
solution/2900-2999/2915.Length of the Longest Subsequence That Sums to Target/Solution.py
|
{
"start": 0,
"end": 461
}
|
class ____:
def lengthOfLongestSubsequence(self, nums: List[int], target: int) -> int:
n = len(nums)
f = [[-inf] * (target + 1) for _ in range(n + 1)]
f[0][0] = 0
for i, x in enumerate(nums, 1):
for j in range(target + 1):
f[i][j] = f[i - 1][j]
if j >= x:
f[i][j] = max(f[i][j], f[i - 1][j - x] + 1)
return -1 if f[n][target] <= 0 else f[n][target]
|
Solution
|
python
|
lazyprogrammer__machine_learning_examples
|
ab_testing/ucb1.py
|
{
"start": 630,
"end": 2198
}
|
class ____:
def __init__(self, p):
# p: the win rate
self.p = p
self.p_estimate = 0.
self.N = 0. # num samples collected so far
def pull(self):
# draw a 1 with probability p
return np.random.random() < self.p
def update(self, x):
self.N += 1.
self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N
def ucb(mean, n, nj):
return mean + np.sqrt(2*np.log(n) / nj)
def run_experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
rewards = np.empty(NUM_TRIALS)
total_plays = 0
# initialization: play each bandit once
for j in range(len(bandits)):
x = bandits[j].pull()
total_plays += 1
bandits[j].update(x)
for i in range(NUM_TRIALS):
j = np.argmax([ucb(b.p_estimate, total_plays, b.N) for b in bandits])
x = bandits[j].pull()
total_plays += 1
bandits[j].update(x)
# for the plot
rewards[i] = x
cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.xscale('log')
plt.show()
# plot moving average ctr linear
plt.plot(cumulative_average)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.show()
for b in bandits:
print(b.p_estimate)
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num times selected each bandit:", [b.N for b in bandits])
return cumulative_average
if __name__ == '__main__':
run_experiment()
|
Bandit
|
python
|
pytorch__pytorch
|
torch/distributed/_symmetric_memory/__init__.py
|
{
"start": 57158,
"end": 70707
}
|
class ____(_Work):
def __init__(self) -> None:
super().__init__()
self.event = torch.cuda.Event()
self.event.record()
def wait(self, timeout: timedelta = timedelta(seconds=0)) -> bool:
self.event.wait()
return True
"""
NOTE [low-contention collectives]
When a collective is overlapped with abundant compute, it makes sense to
prioritize reducing the contention between the collective and the overlapped
compute, even at the cost of a slightly slower collective.
Common collective implementations (e.g., NCCL without user buffer
registration) optimize for throughput with no ambient compute. However, such
implementations may not be optimal when they are overlapped with compute:
- These implementations typically fuse the entire collective into a single
kernel and reserve SM resources based on the most demanding portion of the
collective, even when a large portion of the collective does not require this
much resource.
- These implementations often use SM-based P2P copy as opposed to copy
engine-based P2P copy. Copy engine-based P2P copy may not have a significant
advantage when there's no ambient compute. However, it may significantly
improve overall resource utilization in the presence of ambient compute.
When overlapped with intensive compute (e.g., persistent matmul kernels), the
SM-usage of a collective can lead to inefficient overlapping.
Low-contention collectives achieve their goals with the following strategies:
- Use copy engine-based copy whenever possible.
- Break down portions of a collective with different resource requirements
into multiple kernels. This improves the overlapping efficiency at the cost
of additional launching overhead.
"""
@torch.library.impl(lib, "_low_contention_all_gather", "Meta")
def _low_contention_all_gather_meta(
tensor: torch.Tensor,
group_name: str,
) -> torch.Tensor:
group_size = c10d._get_group_size_by_name(group_name)
return tensor.new_empty(tensor.shape[0] * group_size, *tensor.shape[1:])
@torch.library.impl(lib, "_low_contention_all_gather", "CUDA")
def _low_contention_all_gather(
tensor: torch.Tensor,
group_name: str,
) -> torch.Tensor:
"""
Performs all-gather with symmetric memory in a low-contention fashion.
When `tensor` is already in symmetric memory:
- The collective is carried out without using SMs.
- No symmetric memory workspace is required.
When `tensor` is not in symmetric memory:
- An extra SM-based copy is performed to copy the input data into the
symmetric memory workspace.
- Symmetric memory workspace size requirement: the size of `tensor`.
"""
symm_mem = rendezvous(tensor, group_name)
if symm_mem is not None:
input_is_symm_mem = True
else:
symm_mem = get_symm_mem_workspace(
group_name, tensor.numel() * tensor.element_size()
)
input_is_symm_mem = False
rank = symm_mem.rank
world_size = symm_mem.world_size
output = tensor.new_empty(tensor.shape[0] * world_size, *tensor.shape[1:])
chunks = output.chunk(world_size)
_get_backend_stream().wait_stream(torch.cuda.current_stream())
with _get_backend_stream():
if not input_is_symm_mem:
local_buf = symm_mem.get_buffer(rank, tensor.shape, tensor.dtype)
local_buf.copy_(tensor)
# pull
symm_mem.barrier()
for step in range(world_size):
remote_rank = (rank - step) % world_size
src_buf = symm_mem.get_buffer(remote_rank, tensor.shape, tensor.dtype)
chunks[remote_rank].copy_(src_buf)
symm_mem.barrier()
torch._C._distributed_c10d._register_work(output, Work())
return output
@torch.library.impl(lib, "_low_contention_reduce_scatter", "Meta")
def _low_contention_reduce_scatter_meta(
tensor: torch.Tensor,
reduce_op: str,
group_name: str,
) -> torch.Tensor:
group_size = c10d._get_group_size_by_name(group_name)
return tensor.unflatten(0, (group_size, -1)).mean(dim=0)
def _low_contention_reduce_scatter_with_symm_mem_input(
tensor: torch.Tensor,
reduce_op: str,
symm_mem: _SymmetricMemory,
) -> torch.Tensor:
rank = symm_mem.rank
world_size = symm_mem.world_size
assert tensor.shape[0] % world_size == 0
a2a_res = torch.empty_like(tensor)
chunks = a2a_res.chunk(world_size)
_get_backend_stream().wait_stream(torch.cuda.current_stream())
with _get_backend_stream():
# pull + offline reduction
symm_mem.barrier()
for step in range(world_size):
remote_rank = (rank - step) % world_size
src_buf = symm_mem.get_buffer(
remote_rank,
chunks[0].shape,
chunks[0].dtype,
chunks[0].numel() * rank,
)
chunks[remote_rank].copy_(src_buf)
symm_mem.barrier()
ret = a2a_res.unflatten(0, (world_size, -1))
if reduce_op == "sum":
ret = ret.sum(dim=0)
elif reduce_op == "avg":
ret = ret.mean(dim=0)
else:
raise ValueError(f"reduce_op ({reduce_op}) is not supported")
torch._C._distributed_c10d._register_work(ret, Work())
return ret
def _low_contention_reduce_scatter_with_workspace(
tensor: torch.Tensor,
reduce_op: str,
workspace: _SymmetricMemory,
) -> torch.Tensor:
rank = workspace.rank
world_size = workspace.world_size
assert tensor.shape[0] % world_size == 0
chunks = tensor.chunk(world_size)
_get_backend_stream().wait_stream(torch.cuda.current_stream())
with _get_backend_stream():
# push + offline reduction
workspace.barrier()
for step in range(world_size):
remote_rank = (rank - step) % world_size
dst_buf = workspace.get_buffer(
remote_rank, chunks[0].shape, chunks[0].dtype, chunks[0].numel() * rank
)
dst_buf.copy_(chunks[remote_rank])
workspace.barrier()
buf = workspace.get_buffer(rank, tensor.shape, tensor.dtype)
ret = buf.unflatten(0, (world_size, -1))
if reduce_op == "sum":
ret = ret.sum(dim=0)
elif reduce_op == "avg":
ret = ret.mean(dim=0)
else:
raise ValueError(f"reduce_op ({reduce_op}) is not supported")
torch._C._distributed_c10d._register_work(ret, Work())
return ret
@torch.library.impl(lib, "_low_contention_reduce_scatter", "CUDA")
def _low_contention_reduce_scatter(
tensor: torch.Tensor,
reduce_op: str,
group_name: str,
) -> torch.Tensor:
"""
Performs reduce-scatter with symmetric memory in a low-contention fashion.
This implementation performs a P2P-based all-to-all followed by an offline
reduction.
When `tensor` is already in symmetric memory:
- Pull-based all-to-all is used.
- No symmetric memory workspace is required.
When `tensor` is not in symmetric memory:
- Push-based all-to-all is used.
- Symmetric memory workspace size requirement: the size of `tensor`.
SM-usage:
- SM-based copy of the rank's own chunk for the all-to-all.
- Reduction on the all-to-all result.
TODO(yifu): the SM-based copy can be avoided with a list-based reduction
kernel.
"""
symm_mem = rendezvous(tensor, group_name)
if symm_mem is not None:
return _low_contention_reduce_scatter_with_symm_mem_input(
tensor, reduce_op, symm_mem
)
else:
workspace = get_symm_mem_workspace(
group_name, tensor.numel() * tensor.element_size()
)
return _low_contention_reduce_scatter_with_workspace(
tensor, reduce_op, workspace
)
@torch.library.impl(lib, "all_to_all_vdev_2d", "Meta")
def _all_to_all_vdev_2d_meta(
input: torch.Tensor,
out: torch.Tensor,
in_splits: torch.Tensor,
out_splits_offsets: torch.Tensor,
group_name: str,
major_align: int | None = None,
) -> None:
return None
@torch.library.impl(lib, "all_to_all_vdev_2d_offset", "Meta")
def _all_to_all_vdev_2d_offset_meta(
input: torch.Tensor,
out: torch.Tensor,
in_splits_offsets: torch.Tensor,
out_splits_offsets: torch.Tensor,
group_name: str,
) -> None:
return None
# =============================================================================
# User-facing APIs
# =============================================================================
from collections.abc import Sequence
from typing import overload, TYPE_CHECKING, Union
if TYPE_CHECKING:
from torch._C._distributed_c10d import ProcessGroup
from torch.types import _device, _dtype, _int
@overload
def empty(
*size: _int, dtype: _dtype | None = None, device: _device | None = None
) -> torch.Tensor: ...
@overload
# pyrefly: ignore [inconsistent-overload]
def empty(
size: Sequence[_int],
*,
dtype: _dtype | None = None,
device: _device | None = None,
) -> torch.Tensor: ...
def empty( # type: ignore[misc]
*size: Any,
dtype: _dtype | None = None,
device: _device | None = None,
) -> torch.Tensor:
r"""
Similar to :func:`torch.empty()`. The returned tensor can be used by
:func:`torch._distributed._symmetric_memory.rendezvous()` to establish a
symmetric memory tensor among participating processes.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
"""
if len(size) == 1 and isinstance(size[0], Sequence):
size = tuple(size[0])
else:
size = tuple(size)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.get_default_device()
return _SymmetricMemory.empty_strided_p2p(
size=size,
stride=torch._prims_common.make_contiguous_strides_for(size),
dtype=dtype,
device=torch.device(device),
)
def rendezvous(
tensor: torch.Tensor, group: Union[str, ProcessGroup]
) -> _SymmetricMemory:
r"""
rendezvous(tensor, group) -> _SymmetricMemory
Establish a symmetric memory tensor among participating processes. This is
a collective operation.
Args:
tensor (:class:`torch.Tensor`): the local tensor used to establish the symmetric memory tensor.
It must be allocated via :func:`torch._distributed._symmetric_memory.empty()`. The shape,
dtype, and device type must be identical across all participating processes.
group (Union[str, :class:`torch.distributed.ProcessGroup`]): The group identifying the
participating processes. This can be either a group name or a process group object.
"""
from torch._C._distributed_c10d import ProcessGroup
if isinstance(group, str):
group_name = group
elif isinstance(group, ProcessGroup):
group_name = group.group_name
else:
raise TypeError(f"rendezvous: unsupported group type: {type(group)}")
enable_symm_mem_for_group(group_name)
return _SymmetricMemory.rendezvous(tensor, group_name)
def is_nvshmem_available() -> bool:
r"""
is_nvshmem_available() -> bool
Check if NVSHMEM is available in current build and on current system.
"""
try:
from torch._C._distributed_c10d import _is_nvshmem_available
except ImportError:
# Not all builds have NVSHMEM support.
return False
# Check if NVSHMEM is available on current system.
return _is_nvshmem_available()
def set_backend(name: Literal["NVSHMEM", "CUDA", "NCCL"]) -> None:
r"""
Set the backend for symmetric memory allocation. This is a global setting
and affects all subsequent calls to
:func:`torch._distributed._symmetric_memory.empty()`. Note that the backend
cannot be changed once a symmetric memory tensor has been allocated.
Args:
backend (str): the backend for symmetric memory allocation. Currently,
only `"NVSHMEM"`, `"CUDA"`, `"NCCL"` are supported.
"""
_SymmetricMemory.set_backend(name)
def get_backend(device: _device) -> str | None:
r"""
Get the backend for symmetric memory allocation for a given device. If not
found, return None.
Args:
device (`torch.device` or str): the device for which to get the backend.
"""
return _SymmetricMemory.get_backend(torch.device(device))
def get_mempool_allocator(device: _device): # type: ignore[no-untyped-def]
r"""
Get the MemPool allocator for symmetric memory for a given device.
Args:
device (`torch.device` or str): the device for which to get the MemPool
allocator.
"""
return _SymmetricMemory.get_mempool_allocator(torch.device(device))
__all__ = ["empty", "rendezvous", "is_nvshmem_available", "set_backend", "get_backend"]
|
Work
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/overrides.py
|
{
"start": 4017,
"end": 4207
}
|
class ____(BaseWithDeclaration):
def return_source(self):
pass
def test_overrides_with_declaration(b: BaseWithDeclaration):
_test_sink(b.return_source())
|
DerivedWithDeclaration
|
python
|
pytorch__pytorch
|
test/distributed/optim/test_named_optimizer.py
|
{
"start": 1153,
"end": 15000
}
|
class ____(unittest.TestCase):
def _compare_state_dict_group(self, group, named_group, assert_equal=True):
for key, val in group.items():
if key != "params":
self.assertTrue(
key in named_group, f"{key} not in named optimizer state dict"
)
err_msg = (
f"{key} state not equal" if assert_equal else f"{key} state equal"
)
if isinstance(val, torch.Tensor):
fn = self.assertTrue if assert_equal else self.assertFalse
fn(torch.allclose(val, named_group[key]), err_msg)
else:
fn = self.assertEqual if assert_equal else self.assertNotEqual
fn(val, named_group[key], err_msg)
def _compare_param_groups(self, param_groups_1, param_groups_2):
self.assertTrue(isinstance(param_groups_1, list))
self.assertTrue(isinstance(param_groups_2, list))
for groups in zip(param_groups_1, param_groups_2):
self._compare_param_group(groups[0], groups[1])
def _compare_param_group(self, group_1, group_2):
self.assertTrue(isinstance(group_1, dict))
self.assertTrue(isinstance(group_2, dict))
for key, val in group_1.items():
self.assertTrue(key in group_2)
if key != "params":
self.assertEqual(val, group_2[key])
else:
for tensors in zip(val, group_2[key]):
self.assertTrue(torch.allclose(tensors[0], tensors[1]))
def test_state_dict(self):
"""Check that NamedOptimizer exposes the expected state dict
interface."""
m = TestDummyModel()
m_dup = TestDummyModel()
optim = torch.optim.SGD(
m.parameters(),
lr=1e-2,
momentum=0.9,
)
named_optim = _NamedOptimizer(
m_dup.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.9,
)
self._compare_param_groups(optim.param_groups, named_optim.param_groups)
_run_model_training([(m, [optim]), (m_dup, [named_optim])])
self._compare_param_groups(optim.param_groups, named_optim.param_groups)
sd = optim.state_dict()
named_sd = named_optim.state_dict()
# Compare "state" in optim state dict
self._compare_state_dict_group(
sd["state"][0],
named_sd["state"]["net1.0.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
sd["state"][3],
named_sd["state"]["net2.0.bias"],
assert_equal=True,
)
self._compare_state_dict_group(
sd["state"][4],
named_sd["state"]["net3.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
sd["state"][7],
named_sd["state"]["net4.1.bias"],
assert_equal=True,
)
def test_state_dict_multi_param_group(self):
"""Check that NamedOptimizer exposes the expected state dict
interface when multiple param groups are specified."""
m = TestDummyModel()
m_dup = TestDummyModel()
optim_1 = torch.optim.SGD(
[
{"params": m.net1.parameters()},
{"params": m.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
optim_2 = torch.optim.Adam(
[
{"params": m.net2.parameters()},
{"params": m.net4.parameters(), "lr": 1e-5},
]
)
named_optim_1 = _NamedOptimizer(
m_dup.named_parameters(),
torch.optim.SGD,
[
{"params": m_dup.net1.parameters()},
{"params": m_dup.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
named_optim_2 = _NamedOptimizer(
m_dup.named_parameters(),
torch.optim.Adam,
[
{"params": m_dup.net2.parameters()},
{"params": m_dup.net4.parameters(), "lr": 1e-5},
],
)
self._compare_param_groups(optim_1.param_groups, named_optim_1.param_groups)
self._compare_param_groups(optim_2.param_groups, named_optim_2.param_groups)
_run_model_training(
[(m, [optim_1, optim_2]), (m_dup, [named_optim_1, named_optim_2])]
)
self._compare_param_groups(optim_1.param_groups, named_optim_1.param_groups)
self._compare_param_groups(optim_2.param_groups, named_optim_2.param_groups)
sd_1 = optim_1.state_dict()
sd_2 = optim_2.state_dict()
named_sd_1 = named_optim_1.state_dict()
named_sd_2 = named_optim_2.state_dict()
# Compare "state" in optim state dict
self._compare_state_dict_group(
sd_1["state"][0],
named_sd_1["state"]["net1.0.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
sd_2["state"][1],
named_sd_2["state"]["net2.0.bias"],
assert_equal=True,
)
self._compare_state_dict_group(
sd_1["state"][2],
named_sd_1["state"]["net3.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
sd_2["state"][3],
named_sd_2["state"]["net4.1.bias"],
assert_equal=True,
)
# Compare "param_groups" in optim state dict
self._compare_state_dict_group(
sd_1["param_groups"][0],
named_sd_1["param_groups"][0],
assert_equal=True,
)
self._compare_state_dict_group(
sd_2["param_groups"][1], named_sd_2["param_groups"][1], assert_equal=True
)
def test_load_state_dict(self):
"""Check that NamedOptimizer's load_state_dict works as expected."""
m = TestDummyModel()
named_optim_1 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.9,
)
_run_model_training([(m, [named_optim_1])])
state_dict_to_load = named_optim_1.state_dict()
named_optim_2 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.6,
)
_run_model_training([(m, [named_optim_2])])
state_dict_before_load = named_optim_2.state_dict()
# Compare "state" in optim state dict
self._compare_state_dict_group(
state_dict_to_load["state"]["net1.0.weight"],
state_dict_before_load["state"]["net1.0.weight"],
assert_equal=False,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net2.0.bias"],
state_dict_before_load["state"]["net2.0.bias"],
assert_equal=False,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net3.weight"],
state_dict_before_load["state"]["net3.weight"],
assert_equal=False,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net4.1.bias"],
state_dict_before_load["state"]["net4.1.bias"],
assert_equal=False,
)
named_optim_2.load_state_dict(state_dict_to_load)
state_dict_after_load = named_optim_2.state_dict()
# Compare "state" in optim state dict
self._compare_state_dict_group(
state_dict_to_load["state"]["net1.0.weight"],
state_dict_after_load["state"]["net1.0.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net2.0.bias"],
state_dict_after_load["state"]["net2.0.bias"],
assert_equal=True,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net3.weight"],
state_dict_after_load["state"]["net3.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net4.1.bias"],
state_dict_after_load["state"]["net4.1.bias"],
assert_equal=True,
)
def test_load_state_dict_conditional_training(self):
"""Check that NamedOptimizer load_state_dict works under conditional training case."""
m = TestDummyModel()
named_optim_1 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
[
{"params": m.net1.parameters()},
{"params": m.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
_run_model_training([(m, [named_optim_1])])
state_dict_to_load = named_optim_1.state_dict()
named_optim_2 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.6,
)
_run_model_training([(m, [named_optim_2])])
named_optim_2.load_state_dict(state_dict_to_load)
state_dict_after_load = named_optim_2.state_dict()
# Compare "state" in optim state dict
self._compare_state_dict_group(
state_dict_to_load["state"]["net1.0.weight"],
state_dict_after_load["state"]["net1.0.weight"],
assert_equal=True,
)
self._compare_state_dict_group(
state_dict_to_load["state"]["net3.weight"],
state_dict_after_load["state"]["net3.weight"],
assert_equal=True,
)
def test_load_state_dict_error(self):
m = TestDummyModel()
named_optim_1 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.9,
)
_run_model_training([(m, [named_optim_1])])
state_dict_to_load = named_optim_1.state_dict()
named_optim_2 = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
lr=1e-2,
momentum=0.6,
)
err_msg = (
"Expects the optim to be initialized before load but found not initialized"
)
with self.assertRaisesRegex(ValueError, err_msg):
named_optim_2.load_state_dict(state_dict_to_load)
def test_add_param_group(self):
m = TestDummyModel()
m_dup = TestDummyModel()
optim = torch.optim.SGD(
[
{"params": m.net1.parameters()},
{"params": m.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
named_optim = _NamedOptimizer(
m_dup.named_parameters(),
torch.optim.SGD,
[
{"params": m_dup.net1.parameters()},
{"params": m_dup.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
_run_model_training([(m, [optim]), (m_dup, [named_optim])])
self._compare_param_groups(optim.param_groups, named_optim.param_groups)
optim.add_param_group({"params": m.net2.parameters(), "lr": 1e-5})
named_optim.add_param_group({"params": m_dup.net2.parameters(), "lr": 1e-5})
_run_model_training([(m, [optim]), (m_dup, [named_optim])])
self._compare_param_groups(optim.param_groups, named_optim.param_groups)
optim.add_param_group({"params": m.net4[1].weight, "lr": 1e-3})
named_optim.add_param_group({"params": m_dup.net4[1].weight, "lr": 1e-3})
_run_model_training([(m, [optim]), (m_dup, [named_optim])])
self._compare_param_groups(optim.param_groups, named_optim.param_groups)
def test_add_param_group_error(self):
m = TestDummyModel()
named_optim = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
[
{"params": m.net1.parameters()},
{"params": m.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
err_msg = "some parameters are not in the module"
with self.assertRaisesRegex(ValueError, err_msg):
named_optim.add_param_group({"params": [torch.ones(8, 1)], "lr": 1e-5})
def test_init_state(self):
m = TestDummyModel()
named_optim = _NamedOptimizer(
m.named_parameters(),
torch.optim.SGD,
[
{"params": m.net1.parameters()},
{"params": m.net3.parameters(), "lr": 1e-3},
],
lr=1e-2,
momentum=0.9,
)
named_sd = named_optim.state_dict()
self.assertTrue(m.net1[0].weight.grad is None)
self.assertTrue(len(named_sd["state"]) == 0)
named_optim.init_state()
named_sd = named_optim.state_dict()
self.assertTrue(m.net1[0].weight.grad is not None)
self.assertTrue("momentum_buffer" in named_sd["state"]["net1.0.weight"])
self.assertFalse(
torch.all(named_sd["state"]["net1.0.weight"]["momentum_buffer"]).item()
)
self.assertFalse(
torch.all(named_sd["state"]["net1.0.bias"]["momentum_buffer"]).item()
)
self.assertTrue(m.net3.bias.grad is not None)
self.assertTrue("momentum_buffer" in named_sd["state"]["net3.bias"])
self.assertFalse(
torch.all(named_sd["state"]["net3.bias"]["momentum_buffer"]).item()
)
self.assertFalse(
torch.all(named_sd["state"]["net3.weight"]["momentum_buffer"]).item()
)
|
NamedOptimizerTest
|
python
|
Textualize__textual
|
src/textual/events.py
|
{
"start": 1943,
"end": 2210
}
|
class ____(Event, bubble=False):
"""Sent when there are no more items in the message queue.
This is a pseudo-event in that it is created by the Textual system and doesn't go
through the usual message queue.
- [ ] Bubbles
- [ ] Verbose
"""
|
Idle
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/utils/gcp_authenticator.py
|
{
"start": 2534,
"end": 8694
}
|
class ____(CommandExecutor):
"""
Initialises the authenticator.
:param gcp_key: name of the key to use for authentication (see GCP_*_KEY values)
:param project_extra: optional extra project parameter passed to google cloud
connection
"""
original_account: str | None = None
def __init__(self, gcp_key: str, project_extra: str | None = None):
super().__init__()
self.gcp_key = gcp_key
self.project_extra = project_extra
self.project_id = self.get_project_id()
self.full_key_path = None
self._set_key_path()
@staticmethod
def get_project_id():
return os.environ.get("GCP_PROJECT_ID")
def set_key_path_in_airflow_connection(self):
"""
Set key path in 'google_cloud_default' connection to point to the full
key path
:return: None
"""
with settings.Session() as session:
conn = session.query(Connection).filter(Connection.conn_id == "google_cloud_default")[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = self.full_key_path
if extras.get(KEYFILE_DICT_EXTRA):
del extras[KEYFILE_DICT_EXTRA]
extras[SCOPE_EXTRA] = "https://www.googleapis.com/auth/cloud-platform"
extras[PROJECT_EXTRA] = self.project_extra if self.project_extra else self.project_id
conn.extra = json.dumps(extras)
def set_dictionary_in_airflow_connection(self):
"""
Set dictionary in 'google_cloud_default' connection to contain content
of the json service account file.
:return: None
"""
with settings.Session() as session:
conn = session.query(Connection).filter(Connection.conn_id == "google_cloud_default")[0]
extras = conn.extra_dejson
with open(self.full_key_path) as path_file:
content = json.load(path_file)
extras[KEYFILE_DICT_EXTRA] = json.dumps(content)
if extras.get(KEYPATH_EXTRA):
del extras[KEYPATH_EXTRA]
extras[SCOPE_EXTRA] = "https://www.googleapis.com/auth/cloud-platform"
extras[PROJECT_EXTRA] = self.project_extra
conn.extra = json.dumps(extras)
def _set_key_path(self):
"""
Sets full key path - if GCP_CONFIG_DIR points to absolute
directory, it tries to find the key in this directory. Otherwise it assumes
that Airflow is running from the directory where configuration is checked
out next to airflow directory in config directory
it tries to find the key folder in the workspace's config
directory.
:param : name of the key file to find.
"""
if "GCP_CONFIG_DIR" in os.environ:
gcp_config_dir = os.environ["GCP_CONFIG_DIR"]
else:
gcp_config_dir = os.path.join(AIRFLOW_MAIN_FOLDER, os.pardir, "config")
if not os.path.isdir(gcp_config_dir):
self.log.info("The %s is not a directory", gcp_config_dir)
key_dir = os.path.join(gcp_config_dir, "keys")
if not os.path.isdir(key_dir):
self.log.error("The %s is not a directory", key_dir)
return
key_path = os.path.join(key_dir, self.gcp_key)
if not os.path.isfile(key_path):
self.log.error("The %s file is missing", key_path)
self.full_key_path = key_path
def _validate_key_set(self):
if self.full_key_path is None:
raise AirflowException("The gcp_key is not set!")
if not os.path.isfile(self.full_key_path):
raise AirflowException(
f"The key {self.gcp_key} could not be found. Please copy it to the {self.full_key_path} path."
)
def gcp_authenticate(self):
"""
Authenticate with service account specified via key name.
"""
self._validate_key_set()
self.log.info("Setting the Google Cloud key to %s", self.full_key_path)
# Checking if we can authenticate using service account credentials provided
self.execute_cmd(
[
"gcloud",
"auth",
"activate-service-account",
f"--key-file={self.full_key_path}",
f"--project={self.project_id}",
]
)
self.set_key_path_in_airflow_connection()
def gcp_revoke_authentication(self):
"""
Change default authentication to none - which is not existing one.
"""
self._validate_key_set()
self.log.info("Revoking authentication - setting it to none")
self.execute_cmd(["gcloud", "config", "get-value", "account", f"--project={self.project_id}"])
self.execute_cmd(["gcloud", "config", "set", "account", "none", f"--project={self.project_id}"])
def gcp_store_authentication(self):
"""
Store authentication as it was originally so it can be restored and revoke
authentication.
"""
self._validate_key_set()
if not GcpAuthenticator.original_account:
GcpAuthenticator.original_account = self.check_output(
["gcloud", "config", "get-value", "account", f"--project={self.project_id}"]
).decode("utf-8")
self.log.info("Storing account: to restore it later %s", GcpAuthenticator.original_account)
def gcp_restore_authentication(self):
"""
Restore authentication to the original one.
"""
self._validate_key_set()
if GcpAuthenticator.original_account:
self.log.info("Restoring original account stored: %s", GcpAuthenticator.original_account)
subprocess.call(
[
"gcloud",
"config",
"set",
"account",
GcpAuthenticator.original_account,
f"--project={self.project_id}",
]
)
else:
self.log.info("Not restoring the original Google Cloud account: it is not set")
|
GcpAuthenticator
|
python
|
django-haystack__django-haystack
|
test_haystack/test_templatetags.py
|
{
"start": 800,
"end": 4922
}
|
class ____(TemplateTagTestCase):
def setUp(self):
super().setUp()
self.sample_entry = """
Registering indexes in Haystack is very similar to registering models and
ModelAdmin classes in the Django admin site. If you want to override the default
indexing behavior for your model you can specify your own SearchIndex class.
This is useful for ensuring that future-dated or non-live content is not indexed
and searchable.
Every custom SearchIndex requires there be one and only one field with
document=True. This is the primary field that will get passed to the backend
for indexing. For this field, you'll then need to create a template at
search/indexes/myapp/note_text.txt. This allows you to customize the document
that will be passed to the search backend for indexing. A sample template might
look like.
In addition, you may specify other fields to be populated along with the
document. In this case, we also index the user who authored the document as
well as the date the document was published. The variable you assign the
SearchField to should directly map to the field your search backend is
expecting. You instantiate most search fields with a parameter that points to
the attribute of the object to populate that field with.
"""
def test_simple(self):
template = """{% load highlight %}{% highlight entry with query %}"""
context = {"entry": self.sample_entry, "query": "index"}
self.assertEqual(
self.render(template, context),
'...<span class="highlighted">index</span>ing behavior for your model you can specify your own Search<span class="highlighted">Index</span> class.\nThis is useful for ensuring that future-dated or non-live content is not <span class="highlighted">index</span>ed\nand searchable.\n\nEvery custom Search<span class="highlighted">Index</span> ...',
)
template = """{% load highlight %}{% highlight entry with query html_tag "div" css_class "foo" max_length 100 %}"""
context = {"entry": self.sample_entry, "query": "field"}
self.assertEqual(
self.render(template, context),
'...<div class="foo">field</div> with\ndocument=True. This is the primary <div class="foo">field</div> that will get passed to the backend\nfor indexing...',
)
template = """{% load highlight %}{% highlight entry with query html_tag "div" css_class "foo" max_length 100 %}"""
context = {"entry": self.sample_entry, "query": "Haystack"}
self.assertEqual(
self.render(template, context),
'...<div class="foo">Haystack</div> is very similar to registering models and\nModelAdmin classes in the Django admin site. If y...',
)
template = """{% load highlight %}{% highlight "xxxxxxxxxxxxx foo bbxxxxx foo" with "foo" max_length 5 html_tag "span" %}"""
context = {}
self.assertEqual(
self.render(template, context),
'...<span class="highlighted">foo</span> b...',
)
def test_custom(self):
# Stow.
old_custom_highlighter = getattr(settings, "HAYSTACK_CUSTOM_HIGHLIGHTER", None)
settings.HAYSTACK_CUSTOM_HIGHLIGHTER = "not.here.FooHighlighter"
template = """{% load highlight %}{% highlight entry with query %}"""
context = {"entry": self.sample_entry, "query": "index"}
self.assertRaises(ImproperlyConfigured, self.render, template, context)
settings.HAYSTACK_CUSTOM_HIGHLIGHTER = (
"test_haystack.test_templatetags.BorkHighlighter"
)
template = """{% load highlight %}{% highlight entry with query %}"""
context = {"entry": self.sample_entry, "query": "index"}
self.assertEqual(
self.render(template, context),
"Bork!ing behavior for your model you can specify your own SearchIndex class.\nThis is useful for ensuring that future-dated or non-live content is not Bork!ed\nand searchable.\n\nEvery custom SearchIndex ",
)
# Restore.
settings.HAYSTACK_CUSTOM_HIGHLIGHTER = old_custom_highlighter
|
HighlightTestCase
|
python
|
huggingface__transformers
|
src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py
|
{
"start": 4833,
"end": 5919
}
|
class ____(DepthAnythingDepthEstimationHead):
def forward(self, hidden_states: list[torch.Tensor], patch_height: int, patch_width: int) -> torch.Tensor:
hidden_states = hidden_states[-1]
predicted_depth = self.conv1(hidden_states)
target_height = torch_int(patch_height * self.patch_size)
target_width = torch_int(patch_width * self.patch_size)
predicted_depth = nn.functional.interpolate(
predicted_depth,
(target_height, target_width),
mode="bilinear",
align_corners=True,
)
predicted_depth = self.conv2(predicted_depth)
predicted_depth = self.activation1(predicted_depth)
predicted_depth = self.conv3(predicted_depth)
predicted_depth = self.activation2(predicted_depth)
# (batch_size, 1, height, width) -> (batch_size, height, width), which
# keeps the same behavior as Depth Anything v1 & v2
predicted_depth = predicted_depth.squeeze(dim=1)
return predicted_depth
@auto_docstring
|
PromptDepthAnythingDepthEstimationHead
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 125358,
"end": 125798
}
|
class ____(str, Enum):
"""
Defines source of truth for snapshot recovery: `NoSync` means - restore snapshot without *any* additional synchronization. `Snapshot` means - prefer snapshot data over the current state. `Replica` means - prefer existing data over the snapshot.
"""
def __str__(self) -> str:
return str(self.value)
NO_SYNC = "no_sync"
SNAPSHOT = "snapshot"
REPLICA = "replica"
|
SnapshotPriority
|
python
|
pytorch__pytorch
|
torch/fx/experimental/symbolic_shapes.py
|
{
"start": 105022,
"end": 105122
}
|
class ____(_ShapeGuardsHelper):
source_to_symbol: dict[Source, sympy.Symbol]
|
_CppShapeGuardsHelper
|
python
|
kamyu104__LeetCode-Solutions
|
Python/power-grid-maintenance.py
|
{
"start": 87,
"end": 1424
}
|
class ____(object):
def processQueries(self, c, connections, queries):
"""
:type c: int
:type connections: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
def iter_dfs(i):
stk = [i]
while stk:
u = stk.pop()
if lookup[u] != -1:
continue
lookup[u] = i
for v in reversed(adj[u]):
stk.append(v)
adj = [[] for _ in xrange(c)]
for u, v in connections:
adj[u-1].append(v-1)
adj[v-1].append(u-1)
lookup = [-1]*c
for i in xrange(c):
iter_dfs(i)
groups = [[] for _ in xrange(c)]
for i in reversed(xrange(c)):
groups[lookup[i]].append(i)
result = []
online = [True]*c
for t, x in queries:
x -= 1
if t == 1:
if online[x]:
result.append(x+1)
continue
while groups[lookup[x]] and not online[groups[lookup[x]][-1]]:
groups[lookup[x]].pop()
result.append(groups[lookup[x]][-1]+1 if groups[lookup[x]] else -1)
else:
online[x] = False
return result
|
Solution
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 4626,
"end": 5853
}
|
class ____(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ("x", "y")
self.outputs = ("phi", "theta")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
|
Pix2SkyProjection
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_task_instances.py
|
{
"start": 20171,
"end": 23913
}
|
class ____(TestTaskInstanceEndpoint):
def test_should_respond_200_mapped_task_instance_with_rtif(self, test_client, session):
"""Verify we don't duplicate rows through join to RTIF"""
tis = self.create_task_instances(session)
old_ti = tis[0]
for idx in (1, 2):
ti = TaskInstance(
task=old_ti.task, run_id=old_ti.run_id, map_index=idx, dag_version_id=old_ti.dag_version_id
)
ti.rendered_task_instance_fields = RTIF(ti, render_templates=False)
for attr in ["duration", "end_date", "pid", "start_date", "state", "queue", "note"]:
setattr(ti, attr, getattr(old_ti, attr))
session.add(ti)
session.commit()
# in each loop, we should get the right mapped TI back
for map_index in (1, 2):
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances"
f"/print_the_context/{map_index}",
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "example_python_operator",
"dag_version": mock.ANY,
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"logical_date": "2020-01-01T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"id": mock.ANY,
"map_index": map_index,
"max_tries": 0,
"note": "placeholder-note",
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "running",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 0,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"rendered_fields": {"op_args": [], "op_kwargs": {}, "templates_dict": None},
"rendered_map_index": str(map_index),
"run_after": "2020-01-01T00:00:00Z",
"trigger": None,
"triggerer_job": None,
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/1",
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/1",
)
assert response.status_code == 403
def test_should_respond_404_wrong_map_index(self, test_client, session):
self.create_task_instances(session)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/10",
)
assert response.status_code == 404
assert response.json() == {
"detail": "The Mapped Task Instance with dag_id: `example_python_operator`, run_id: `TEST_DAG_RUN_ID`, task_id: `print_the_context`, and map_index: `10` was not found"
}
|
TestGetMappedTaskInstance
|
python
|
PrefectHQ__prefect
|
tests/client/test_prefect_client.py
|
{
"start": 99385,
"end": 105633
}
|
class ____:
@pytest.fixture
async def deployment(self, prefect_client):
foo = flow(lambda: None, name="foo")
flow_id = await prefect_client.create_flow(foo)
schedule = IntervalSchedule(
interval=timedelta(days=1), anchor_date=DateTime(2020, 1, 1)
)
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name="test-deployment",
schedules=[DeploymentScheduleCreate(schedule=schedule)],
parameters={"foo": "bar"},
work_queue_name="wq",
)
deployment = await prefect_client.read_deployment(deployment_id)
return deployment
async def test_create_deployment_schedule(self, prefect_client, deployment):
deployment_id = str(deployment.id)
cron_schedule = CronSchedule(cron="* * * * *")
schedules = [(cron_schedule, True)]
result = await prefect_client.create_deployment_schedules(
deployment_id, schedules
)
assert len(result) == 1
assert result[0].id
assert result[0].schedule == cron_schedule
assert result[0].active is True
async def test_create_multiple_deployment_schedules_success(
self, prefect_client, deployment
):
deployment_id = str(deployment.id)
cron_schedule = CronSchedule(cron="0 12 * * *")
interval_schedule = IntervalSchedule(interval=timedelta(hours=1))
schedules = [(cron_schedule, True), (interval_schedule, False)]
result = await prefect_client.create_deployment_schedules(
deployment_id, schedules
)
assert len(result) == 2
# Assuming the order of results matches the order of input schedules
assert result[0].schedule == cron_schedule
assert result[0].active is True
assert result[1].schedule == interval_schedule
assert result[1].active is False
async def test_read_deployment_schedules_success(self, prefect_client, deployment):
result = await prefect_client.read_deployment_schedules(deployment.id)
assert len(result) == 1
assert result[0].schedule == IntervalSchedule(
interval=timedelta(days=1), anchor_date=DateTime(2020, 1, 1)
)
assert result[0].active is True
async def test_update_deployment_schedule_only_active(
self, deployment, prefect_client
):
result = await prefect_client.read_deployment_schedules(deployment.id)
assert result[0].active is True
await prefect_client.update_deployment_schedule(
deployment.id, deployment.schedules[0].id, active=False
)
result = await prefect_client.read_deployment_schedules(deployment.id)
assert len(result) == 1
assert result[0].active is False
async def test_update_deployment_schedule_only_schedule(
self, deployment, prefect_client
):
result = await prefect_client.read_deployment_schedules(deployment.id)
assert result[0].schedule == IntervalSchedule(
interval=timedelta(days=1), anchor_date=DateTime(2020, 1, 1)
)
await prefect_client.update_deployment_schedule(
deployment.id,
deployment.schedules[0].id,
schedule=IntervalSchedule(interval=timedelta(minutes=15)),
)
result = await prefect_client.read_deployment_schedules(deployment.id)
assert len(result) == 1
assert result[0].schedule.interval == timedelta(minutes=15)
async def test_update_deployment_schedule_all_fields(
self, deployment, prefect_client
):
"""
A regression test for #13243
"""
result = await prefect_client.read_deployment_schedules(deployment.id)
assert result[0].schedule == IntervalSchedule(
interval=timedelta(days=1), anchor_date=DateTime(2020, 1, 1)
)
assert result[0].active is True
await prefect_client.update_deployment_schedule(
deployment.id,
deployment.schedules[0].id,
schedule=IntervalSchedule(interval=timedelta(minutes=15)),
active=False,
)
result = await prefect_client.read_deployment_schedules(deployment.id)
assert len(result) == 1
assert result[0].schedule.interval == timedelta(minutes=15)
assert result[0].active is False
async def test_delete_deployment_schedule_success(self, deployment, prefect_client):
await prefect_client.delete_deployment_schedule(
deployment.id, deployment.schedules[0].id
)
result = await prefect_client.read_deployment_schedules(deployment.id)
assert len(result) == 0
async def test_create_deployment_schedules_with_invalid_schedule(
self, prefect_client, deployment
):
deployment_id = str(deployment.id)
invalid_schedule = (
"not a valid schedule" # Assuming the client validates the schedule format
)
schedules = [(invalid_schedule, True)]
with pytest.raises(pydantic.ValidationError):
await prefect_client.create_deployment_schedules(deployment_id, schedules)
async def test_read_deployment_schedule_nonexistent(self, prefect_client):
nonexistent_deployment_id = str(uuid4())
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.read_deployment_schedules(nonexistent_deployment_id)
async def test_update_deployment_schedule_nonexistent(
self, prefect_client, deployment
):
nonexistent_schedule_id = str(uuid4())
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.update_deployment_schedule(
deployment.id, nonexistent_schedule_id, active=False
)
async def test_delete_deployment_schedule_nonexistent(
self, prefect_client, deployment
):
nonexistent_schedule_id = str(uuid4())
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.delete_deployment_schedule(
deployment.id, nonexistent_schedule_id
)
|
TestPrefectClientDeploymentSchedules
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1beta2_network_device_data.py
|
{
"start": 383,
"end": 6504
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'hardware_address': 'str',
'interface_name': 'str',
'ips': 'list[str]'
}
attribute_map = {
'hardware_address': 'hardwareAddress',
'interface_name': 'interfaceName',
'ips': 'ips'
}
def __init__(self, hardware_address=None, interface_name=None, ips=None, local_vars_configuration=None): # noqa: E501
"""V1beta2NetworkDeviceData - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._hardware_address = None
self._interface_name = None
self._ips = None
self.discriminator = None
if hardware_address is not None:
self.hardware_address = hardware_address
if interface_name is not None:
self.interface_name = interface_name
if ips is not None:
self.ips = ips
@property
def hardware_address(self):
"""Gets the hardware_address of this V1beta2NetworkDeviceData. # noqa: E501
HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. Must not be longer than 128 characters. # noqa: E501
:return: The hardware_address of this V1beta2NetworkDeviceData. # noqa: E501
:rtype: str
"""
return self._hardware_address
@hardware_address.setter
def hardware_address(self, hardware_address):
"""Sets the hardware_address of this V1beta2NetworkDeviceData.
HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. Must not be longer than 128 characters. # noqa: E501
:param hardware_address: The hardware_address of this V1beta2NetworkDeviceData. # noqa: E501
:type: str
"""
self._hardware_address = hardware_address
@property
def interface_name(self):
"""Gets the interface_name of this V1beta2NetworkDeviceData. # noqa: E501
InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod. Must not be longer than 256 characters. # noqa: E501
:return: The interface_name of this V1beta2NetworkDeviceData. # noqa: E501
:rtype: str
"""
return self._interface_name
@interface_name.setter
def interface_name(self, interface_name):
"""Sets the interface_name of this V1beta2NetworkDeviceData.
InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod. Must not be longer than 256 characters. # noqa: E501
:param interface_name: The interface_name of this V1beta2NetworkDeviceData. # noqa: E501
:type: str
"""
self._interface_name = interface_name
@property
def ips(self):
"""Gets the ips of this V1beta2NetworkDeviceData. # noqa: E501
IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6. # noqa: E501
:return: The ips of this V1beta2NetworkDeviceData. # noqa: E501
:rtype: list[str]
"""
return self._ips
@ips.setter
def ips(self, ips):
"""Sets the ips of this V1beta2NetworkDeviceData.
IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6. # noqa: E501
:param ips: The ips of this V1beta2NetworkDeviceData. # noqa: E501
:type: list[str]
"""
self._ips = ips
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2NetworkDeviceData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2NetworkDeviceData):
return True
return self.to_dict() != other.to_dict()
|
V1beta2NetworkDeviceData
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/modeling_oneformer.py
|
{
"start": 110231,
"end": 111372
}
|
class ____(nn.Module):
def __init__(
self,
d_model,
nhead,
dropout=0.1,
layer_norm_eps=1e-05,
):
super().__init__()
self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.mlp = nn.Sequential(
nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model)
)
def forward(self, hidden_state, mem):
q = k = v = self.norm1(hidden_state)
hidden_state = hidden_state + self.self_attn(q, k, v)
q = self.norm2(hidden_state)
hidden_state = hidden_state + self.cross_attn(q, mem, mem)
hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state)))
return hidden_state
|
OneFormerTextTransformerDecoderLayer
|
python
|
getsentry__sentry
|
src/sentry/eventtypes/security.py
|
{
"start": 1142,
"end": 1471
}
|
class ____(SecurityEvent):
key = "csp"
def extract_metadata(self, data):
metadata = SecurityEvent.extract_metadata(self, data)
metadata["uri"] = csp.normalize_value(data["csp"].get("blocked_uri") or "")
metadata["directive"] = data["csp"].get("effective_directive")
return metadata
|
CspEvent
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 346014,
"end": 347007
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateEnterpriseProfile"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "name", "description", "website_url", "location", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The Enterprise ID to update."""
name = sgqlc.types.Field(String, graphql_name="name")
"""The name of the enterprise."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The description of the enterprise."""
website_url = sgqlc.types.Field(String, graphql_name="websiteUrl")
"""The URL of the enterprise's website."""
location = sgqlc.types.Field(String, graphql_name="location")
"""The location of the enterprise."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
UpdateEnterpriseProfileInput
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_indexing.py
|
{
"start": 655,
"end": 20753
}
|
class ____:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11), dtype=np.int64))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
@pytest.mark.filterwarnings(
"ignore:Series.__getitem__ treating keys as positions is deprecated:"
"FutureWarning"
)
def test_getitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
if type(index) is Index and not isinstance(index._values, np.ndarray):
# e.g. Int64
msgs.append("values must be a 1D array")
# string[pyarrow]
msgs.append("only handle 1-dimensional arrays")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
@pytest.mark.filterwarnings(
"ignore:Series.__setitem__ treating keys as positions is deprecated:"
"FutureWarning"
)
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Index([1, 2, np.inf], dtype=np.float64)
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
with pytest.raises(TypeError, match="Invalid value"):
df.loc[0, "c"] = "foo"
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
with pytest.raises(TypeError, match="Invalid value"):
left.loc["a", "bar"] = val
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
with pytest.raises(TypeError, match="Invalid value"):
left.loc["a", "bar"] = "wxyz"
def test_dups_fancy_indexing(self):
# GH 3455
df = DataFrame(np.eye(3), columns=["a", "a", "b"])
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa") # GH#3468
# GH#3509 smoke tests for indexing with duplicate columns
df.iloc[:, 4]
result.iloc[:, 4]
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self, using_infer_string):
# List containing only missing label
dfnu = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)), index=list("AABCD")
)
if using_infer_string:
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='str')] are in the [index]\""
),
):
dfnu.loc[["E"]]
else:
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 5)),
columns=["A", "B", "B", "B", "A"],
)
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.default_rng(2).standard_normal((9, 2)),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3],
columns=["a", "b"],
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(
np.random.default_rng(2).random((10, 5)), columns=["a"] + [20, 21, 22, 23]
)
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
# set float64 to avoid upcast when setting nan
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
).astype({"col2": "float64"})
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0, 1, 4, 6, 8, 10],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"].copy()
tm.assert_frame_equal(df, expected)
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self, using_infer_string):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df_orig[list("ABCDG")] = df_orig[list("ABCDG")].astype(object)
df = df_orig.copy()
# with the enforcement of GH#45333 in 2.0, this setting is attempted inplace,
# so object dtype is retained
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
expected[list("CDG")] = expected[list("CDG")].astype(object)
expected["A"] = expected["A"].astype(object)
expected["B"] = expected["B"].astype(object)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
expected[list("ABCDG")] = expected[list("ABCDG")].astype(object)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
expected[list("ABCDG")] = expected[list("ABCDG")].astype(object)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
# With the enforcement of GH#45333 in 2.0, this assignment occurs inplace,
# so float64 is retained
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert is_integer_dtype(s.index)
s2 = s.copy()
indexer(s2)[0.1] = 0
assert is_float_dtype(s2.index)
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert is_object_dtype(s2.index)
for s in [Series(range(5), index=np.arange(5.0))]:
assert is_float_dtype(s.index)
s2 = s.copy()
indexer(s2)[0.1] = 0
assert is_float_dtype(s2.index)
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert is_object_dtype(s2.index)
|
TestFancy
|
python
|
ray-project__ray
|
rllib/models/preprocessors.py
|
{
"start": 12332,
"end": 16616
}
|
class ____(Preprocessor):
"""Pads and batches the variable-length list value."""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
assert isinstance(self._obs_space, Repeated)
child_space = obs_space.child_space
self.child_preprocessor = get_preprocessor(child_space)(
child_space, self._options
)
# The first slot encodes the list length.
size = 1 + self.child_preprocessor.size * obs_space.max_len
return (size,)
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
array = np.zeros(self.shape)
if isinstance(observation, list):
for elem in observation:
self.child_preprocessor.check_shape(elem)
else:
pass # ValueError will be raised in write() below.
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray, offset: int) -> None:
if not isinstance(observation, (list, np.ndarray)):
raise ValueError(
"Input for {} must be list type, got {}".format(self, observation)
)
elif len(observation) > self._obs_space.max_len:
raise ValueError(
"Input {} exceeds max len of space {}".format(
observation, self._obs_space.max_len
)
)
# The first slot encodes the list length.
array[offset] = len(observation)
for i, elem in enumerate(observation):
offset_i = offset + 1 + i * self.child_preprocessor.size
self.child_preprocessor.write(elem, array, offset_i)
@OldAPIStack
def get_preprocessor(space: gym.Space, include_multi_binary=False) -> type:
"""Returns an appropriate preprocessor class for the given space."""
_legacy_patch_shapes(space)
obs_shape = space.shape
if isinstance(space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
preprocessor = OneHotPreprocessor
elif obs_shape == ATARI_OBS_SHAPE:
logger.debug(
"Defaulting to RLlib's GenericPixelPreprocessor because input "
"space has the atari-typical shape {}. Turn this behaviour off by setting "
"`preprocessor_pref=None` or "
"`preprocessor_pref='deepmind'` or disabling the preprocessing API "
"altogether with `_disable_preprocessor_api=True`.".format(ATARI_OBS_SHAPE)
)
preprocessor = GenericPixelPreprocessor
elif obs_shape == ATARI_RAM_OBS_SHAPE:
logger.debug(
"Defaulting to RLlib's AtariRamPreprocessor because input "
"space has the atari-typical shape {}. Turn this behaviour off by setting "
"`preprocessor_pref=None` or "
"`preprocessor_pref='deepmind' or disabling the preprocessing API "
"altogether with `_disable_preprocessor_api=True`."
"`.".format(ATARI_OBS_SHAPE)
)
preprocessor = AtariRamPreprocessor
elif isinstance(space, gym.spaces.Tuple):
preprocessor = TupleFlatteningPreprocessor
elif isinstance(space, gym.spaces.Dict):
preprocessor = DictFlatteningPreprocessor
elif isinstance(space, Repeated):
preprocessor = RepeatedValuesPreprocessor
# We usually only want to include this when using RLModules
elif isinstance(space, gym.spaces.MultiBinary) and include_multi_binary:
preprocessor = MultiBinaryPreprocessor
else:
preprocessor = NoPreprocessor
return preprocessor
def _legacy_patch_shapes(space: gym.Space) -> List[int]:
"""Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shapes properly
for Tuple and Discrete spaces.
"""
if not hasattr(space, "shape"):
if isinstance(space, gym.spaces.Discrete):
space.shape = ()
elif isinstance(space, gym.spaces.Tuple):
shapes = []
for s in space.spaces:
shape = _legacy_patch_shapes(s)
shapes.append(shape)
space.shape = tuple(shapes)
return space.shape
|
RepeatedValuesPreprocessor
|
python
|
plotly__plotly.py
|
plotly/graph_objs/volume/_colorbar.py
|
{
"start": 233,
"end": 61447
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume"
_path_str = "volume.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.volume.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.volume.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.volume.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.volume.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
volume.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.volume.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.volume.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.volume
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
volume.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.volume
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
volume.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
ColorBar
|
python
|
sympy__sympy
|
sympy/tensor/array/sparse_ndim_array.py
|
{
"start": 3073,
"end": 4221
}
|
class ____(SparseNDimArray, ImmutableNDimArray): # type: ignore
def __new__(cls, iterable=None, shape=None, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
shape = Tuple(*map(_sympify, shape))
cls._check_special_bounds(flat_list, shape)
loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
sparse_array = Dict(flat_list)
else:
sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
sparse_array[i] = _sympify(el)
sparse_array = Dict(sparse_array)
self = Basic.__new__(cls, sparse_array, shape, **kwargs)
self._shape = shape
self._rank = len(shape)
self._loop_size = loop_size
self._sparse_array = sparse_array
return self
def __setitem__(self, index, value):
raise TypeError("immutable N-dim array")
def as_mutable(self):
return MutableSparseNDimArray(self)
|
ImmutableSparseNDimArray
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/userrollback.py
|
{
"start": 254,
"end": 427
}
|
class ____(TypedDict):
organization: RollbackOrganizationSerializerResponse
user: RollbackUserSerializerResponse
data: dict # JSON Blob
|
RollbackSerializerResponse
|
python
|
google__jax
|
jax/_src/pallas/mosaic/sc_primitives.py
|
{
"start": 15166,
"end": 28851
}
|
class ____(jax_core.Effect):
pass
effects.control_flow_allowed_effects.add_type(MemoryEffect)
effects.lowerable_effects.add_type(MemoryEffect)
_memory_effect = MemoryEffect()
barrier_p = jax_core.Primitive("barrier")
barrier_p.multiple_results = True
@barrier_p.def_effectful_abstract_eval
def _barrier_abstract_eval():
return (), {_memory_effect}
@sc_lowering.register_lowering_rule(barrier_p)
def _barrier_lowering_rule(ctx: sc_lowering.LoweringRuleContext):
ix = ir.IndexType.get()
tpu.barrier(arith.constant(ix, ir.IntegerAttr.get(ix, 0)))
return ()
def subcore_barrier():
"""Blocks until all subcores on the same core reach this instruction.
The barrier must be used with the vector subcore, either via
:class:jax.experimental.pallas.tpu_sc.VectorSubcoreMesh or by specifying
```
pltpu.CompilerParams(
kernel_type=pltpu.KernelType.SC_VECTOR_SUBCORE,
dimension_semantics[..., "subcore_parallel", ...])
```
to ``pallas_call``.
"""
barrier_p.bind()
scan_count_p = jax_core.Primitive("unique")
scan_count_p.multiple_results = True
@scan_count_p.def_abstract_eval
def _scan_count_abstract_eval(x, mask):
if x.dtype not in (jnp.uint32, jnp.int32, jnp.float32):
raise NotImplementedError(
f"x.dtype={x.dtype} must be uint32, int32 or float32")
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask.dtype={mask.dtype} is not a boolean dtype")
if x.shape != mask.shape:
raise ValueError(f"x.shape={x.shape} != mask.shape={mask.shape}")
return jax_core.ShapedArray(x.shape, jnp.int32), mask
@sc_lowering.register_lowering_rule(scan_count_p)
def _scan_count_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, mask):
del ctx # Unused.
# Reverse, because the MLIR op returns the mask first.
return tpu.scan_count(mask, x)[::-1]
def scan_count(
x: jax.Array, mask: jax.Array | None = None
) -> tuple[jax.Array, jax.Array]:
"""Computes the running duplicate occurrence count of the array.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements ``x``
are eligible for counting. If ``None``, all elements are eligible.
Returns:
A tuple of two arrays:
* the running duplicate occurrence count of ``x``;
* the mask indicating the last occurrence of each duplicate that was
counted.
"""
return scan_count_p.bind(x, lax.full(x.shape, True) if mask is None else mask)
masked_cummax_p = jax_core.Primitive("masked_cummax")
masked_cummax_p.multiple_results = False
@masked_cummax_p.def_abstract_eval
def _masked_cummax_abstract_eval(x, mask):
if x.dtype != jnp.int32 and x.dtype != jnp.float32:
raise NotImplementedError(f"x.dtype={x.dtype} must be int32 or float32")
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask.dtype={mask.dtype} is not a boolean dtype")
if x.shape != mask.shape:
raise ValueError(f"x.shape={x.shape} != mask.shape={mask.shape}")
return x
@sc_lowering.register_lowering_rule(masked_cummax_p)
def _masked_cummax_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, mask):
del ctx # Unused.
return tpu.scan(
x.type, x, ir.Attribute.parse("#tpu.reduction_kind<max>"), mask=mask)
def cummax(x: jax.Array, *, mask: jax.Array | None = None) -> jax.Array:
"""Returns the cumulative max of the array along its innermost axis.
Elements from `x` will pass through directly to the result until the first
valid value is encountered (`mask[i] == True`). If you would like to specify
a default value for such elements instead, write
`x = jnp.where(mask, x, default_value)` before or after calling this function.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements of `x`
are eligible for the max. If `None`, all elements are eligible.
"""
if x.ndim != 1:
raise NotImplementedError(f"masked_cummax: x={x.aval} must be rank 1")
if mask is None:
mask = lax.full(x.shape, True)
return masked_cummax_p.bind(x, mask)
@sc_lowering.register_lowering_rule(
lax.reduce_max_p, kernel_types=[tpu_core.KernelType.SC_VECTOR_SUBCORE])
def _reduce_max_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, axes):
if axes != (0,):
raise NotImplementedError(
f"reduce_max requires axes to be (0,) on SparseCore, but got {axes}.")
vec_dim = ctx.avals_in[0].shape[0]
i1t = ir.IntegerType.get_signless(1)
c1 = arith.constant(i1t, ir.IntegerAttr.get(i1t, 1))
c1v = vector.broadcast(ir.VectorType.get(x.type.shape, c1.type), c1)
return vector.extract(
_masked_cummax_lowering_rule(ctx, x, c1v), [], [vec_dim - 1])
masked_cumsum_p = jax_core.Primitive("masked_cumsum")
masked_cumsum_p.multiple_results = False
@masked_cumsum_p.def_abstract_eval
def _masked_cumsum_abstract_eval(x, mask):
if x.dtype != jnp.int32 and x.dtype != jnp.float32:
raise NotImplementedError(f"x.dtype={x.dtype} must be int32 or float32")
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask.dtype={mask.dtype} is not a boolean dtype")
if x.shape != mask.shape:
raise ValueError(f"x.shape={x.shape} != mask.shape={mask.shape}")
return jax_core.ShapedArray(x.shape, x.dtype)
@sc_lowering.register_lowering_rule(masked_cumsum_p)
def _masked_cumsum_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, mask):
del ctx # Unused.
return tpu.scan(
x.type, x, ir.Attribute.parse("#tpu.reduction_kind<sum>"), mask=mask)
@sc_lowering.register_lowering_rule(lax.cumsum_p)
def _cumsum_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, axis,
reverse):
if axis != 0:
raise NotImplementedError(f"SC cumsum: axis={axis} must be 0.")
if len(ctx.avals_in[0].shape) != 1:
raise NotImplementedError(f"SC cumsum: x={ctx.avals_in[0]} must be rank 1")
if reverse:
raise NotImplementedError("SC cumsum: reverse=True is not yet supported")
i1t = ir.IntegerType.get_signless(1)
c1 = arith.constant(i1t, ir.IntegerAttr.get(i1t, 1))
c1v = vector.broadcast(ir.VectorType.get(x.type.shape, c1.type), c1)
return tpu.scan(
x.type, x, ir.Attribute.parse("#tpu.reduction_kind<sum>"), mask=c1v)
def cumsum(x: jax.Array, *, mask: jax.Array | None = None) -> jax.Array:
"""Returns the cumulative sum of the array along its innermost axis.
This differs from `jnp.cumsum` in that it takes an additional `mask` argument.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements of `x`
are eligible for summing. If `None`, all elements are eligible.
"""
if x.ndim != 1:
raise NotImplementedError(f"cumsum: x={x.aval} must be rank 1")
if mask is None:
mask = lax.full(x.shape, True)
return masked_cumsum_p.bind(x, mask)
@sc_lowering.register_lowering_rule(
lax.reduce_sum_p, kernel_types=[tpu_core.KernelType.SC_VECTOR_SUBCORE])
def _reduce_sum_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, x, axes, out_sharding):
del out_sharding # Unused.
vec_dim = ctx.avals_in[0].shape[0]
if axes != (0,):
raise NotImplementedError(f"SC reduce_sum: axes={axes} must be (0,).")
return vector.extract(
_cumsum_lowering_rule(ctx, x, 0, reverse=False), [], [vec_dim - 1])
parallel_loop_p = jax_core.Primitive("parallel_loop")
parallel_loop_p.is_effectful = lambda params: bool(params["jaxpr"].effects) # type: ignore
parallel_loop_p.multiple_results = True
@parallel_loop_p.def_effectful_abstract_eval
def _parallel_loop_abstract_eval(*args, jaxpr, tree, **params):
del params # Unused.
_, _, _, _, carries = tree.unflatten(args)
if any(isinstance(c, (Ref, TransformedRef)) for c in carries):
raise TypeError(f"Carried values may not be refs, but got: {carries}")
updated_effects = set()
for eff in jaxpr.effects:
if isinstance(eff, effects.JaxprInputEffect):
# Offset for the parallel_loop eqn to account for start, stop, and step
# args passed to parallel_loop_p.bind.
eff = eff.replace(input_index=eff.input_index + 3)
updated_effects.add(eff)
return carries, updated_effects
@sc_lowering.register_lowering_rule(parallel_loop_p)
def _parallel_loop_lowering_rule(
ctx: sc_lowering.LoweringRuleContext,
*flat_args,
tree,
unroll,
jaxpr,
):
lower, upper, step, consts, carry = tree.unflatten(flat_args)
for_op = scf.ForOp(
_ensure_ir_value(lower, pallas_core.index_map_grid_aval),
_ensure_ir_value(upper, pallas_core.index_map_grid_aval),
_ensure_ir_value(step, pallas_core.index_map_grid_aval),
carry,
)
for_op.attributes["sc.parallel_access"] = ir.UnitAttr.get()
for_op.attributes["sc.loop_unroll_factor"] = ir.IntegerAttr.get(
ir.IntegerType.get_signless(64), unroll
)
with ir.InsertionPoint(for_op.body):
_, _, _, consts_block_shapes, *_ = tree.unflatten(ctx.block_shapes)
lowering_ctx = ctx.lowering_context.replace(
block_shapes=[*consts_block_shapes, None] + [None] * len(carry),
)
carry_out = tc_lowering.jaxpr_subcomp(
lowering_ctx,
pe.convert_constvars_jaxpr(jaxpr),
*consts,
for_op.induction_variable,
*for_op.inner_iter_args,
)
scf.yield_(carry_out)
return for_op.results
@overload
def parallel_loop(
lower: jax.typing.ArrayLike,
upper: jax.typing.ArrayLike,
step: jax.typing.ArrayLike = ...,
*,
unroll: int = ...,
carry: None = None,
) -> Callable[[Callable[[jax.Array], None]], None]:
...
@overload
def parallel_loop(
lower: jax.typing.ArrayLike,
upper: jax.typing.ArrayLike,
step: jax.typing.ArrayLike = ...,
*,
unroll: int = ...,
carry: _T,
) -> Callable[[Callable[[jax.Array, _T], _T]], _T]:
...
def parallel_loop(lower, upper, step=1, *, unroll=1, carry=None):
"""A parallel loop decorator.
The decorated function forms the loop body. It is called with the current
loop index as the argument and optionally, a single additional carry argument.
The loop iterations must be independent, meaning that operations in one
iteration cannot depend on the side effects, especially Ref writes, of any
other iteration. This allows the compiler to execute instructions from
different iterations concurrently, potentially reordering them for better
performance.
Cross-iteration dependencies traceable via carried values are allowed. Refs
may not be carried.
Safe usage of carried value::
@parallel_loop(0, 64, step=8, carry=jnp.int32(1))
def body(i, j):
# Writes are independent across iterations.
x_ref[pl.ds(i, 8)] = j + jnp.arange(8)
return j + 1
Any pytree can be carried. The final value is returned by the decorator::
def body(i, my_tree: MyTree):
# Writes are independent across iterations.
x_ref[pl.ds(i, 8)] = my_tree.transform(jnp.arange(8))
return my_tree.step(i)
final_value = parallel_loop(0, 64, step=8, carry=MyTree())(body)
Undefined result::
@parallel_loop(0, 64, step=4, carry=jnp.int32(1))
def body(i, j):
# Because the step size is 4, the array written is of size 8, and loop
# iterations may be reordered, the values in indices 4-59 of x_ref are
# unspecified after the loop. (The values in 0-3 and 60-63 are only
# written by the first and last iterations, so are well-defined.)
x_ref[pl.ds(i, 8)] = j + jnp.arange(8)
return j + 1
Unsafe read of "previous" iteration's write (don't do this)::
@parallel_loop(0, 64, 8, carry=jnp.int32(1))
def body(i, j):
# Unsafe because it depends on the side-effect of "previous" iterations,
# which may be executed in parallel or reordered.
mask = x_ref[pl.ds(0, 8)] < j
x_ref[pl.ds(0, 8)] += jnp.where(mask, j + jnp.arange(8), 0)
return j + 1
Args:
lower: The starting value of the loop index.
upper: The exclusive upper bound of the loop index.
step: The increment of the loop index. Default to 1.
unroll: The unroll factor of the loop.
carry: Optional carried state of the loop.
Returns:
A decorator that executes the given function in a parallel loop.
"""
def decorator(body):
flat_carries, carry_tree = jax.tree.flatten(carry)
def wrapped(idx, *carries):
if carry is None:
body(idx)
return []
result = body(idx, carry_tree.unflatten(carries))
result, result_tree = jax.tree.flatten(result)
if result_tree != carry_tree:
raise ValueError(
"parallel_loop: body result should have same structure as carry:"
f" {result_tree} != {carry_tree}"
)
return result
flat_avals = [
pallas_core.index_map_grid_aval,
*(c.aval for c in flat_carries),
]
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(
wrapped,
debug_info=api_util.debug_info(
"parallel_loop", body, flat_avals, {}
),
),
flat_avals,
)
carry_tree.unflatten(jaxpr.outvars) # Verify same structure.
disallowed_effects = effects.control_flow_allowed_effects.filter_not_in(
jaxpr.effects
)
if disallowed_effects:
raise NotImplementedError(
f"Effects not supported in parallel_loop: {disallowed_effects}"
)
flat_args, tree = jax.tree.flatten(
(lower, upper, step, consts, flat_carries)
)
flat_result = parallel_loop_p.bind(
*flat_args, tree=tree, unroll=unroll, jaxpr=jaxpr
)
if carry is None:
return None
return carry_tree.unflatten(flat_result)
return decorator
|
MemoryEffect
|
python
|
django__django
|
django/db/models/lookups.py
|
{
"start": 22597,
"end": 22713
}
|
class ____(PatternLookup):
lookup_name = "startswith"
param_pattern = "%s%%"
@Field.register_lookup
|
StartsWith
|
python
|
google__jax
|
jax/_src/interpreters/mlir.py
|
{
"start": 25829,
"end": 26330
}
|
class ____:
traceback_to_location_cache: Any # jax_mlir_ext.TracebackToLocationCache
canonical_name_cache: dict[str, str]
def __init__(self):
frame_limit = config.traceback_in_locations_limit.value
frame_limit = frame_limit if frame_limit >= 0 else 1000
self.traceback_to_location_cache = jax_mlir_ext.TracebackToLocationCache(
code_to_filename=_code_to_filename, frame_limit=frame_limit)
self.canonical_name_cache = {}
@dataclasses.dataclass(frozen=True)
|
TracebackCaches
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/targets_to_configuration.py
|
{
"start": 898,
"end": 1930
}
|
class ____(libcst.CSTTransformer):
@override
def leave_Call(
self, original_node: libcst.Call, updated_node: libcst.Call
) -> libcst.Call:
check_types = False
uses_pyre = True
updated_fields = []
for field in original_node.args:
name = field.keyword
value = field.value
if not name:
continue
name = name.value
if name == "check_types":
if isinstance(value, libcst.Name):
check_types = check_types or value.value.lower() == "true"
elif name == "check_types_options":
if isinstance(value, libcst.SimpleString):
uses_pyre = uses_pyre and "mypy" not in value.value.lower()
elif name not in ["typing", "typing_options"]:
updated_fields.append(field)
if check_types and uses_pyre:
return updated_node.with_changes(args=updated_fields)
return updated_node
|
TargetPyreRemover
|
python
|
google__jax
|
tests/error_check_test.py
|
{
"start": 1201,
"end": 11591
}
|
class ____(jtu.JaxTestCase):
@parameterized.product(jit=[True, False])
def test_error_check(self, jit):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
if jit:
f = jax.jit(f)
x = jnp.full((4,), -1, dtype=jnp.int32)
f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
def test_error_check_no_error(self, jit):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
if jit:
f = jax.jit(f)
x = jnp.full((4,), 1, dtype=jnp.int32)
f(x)
error_check.raise_if_error() # should not raise error
@parameterized.product(jit=[True, False])
def test_error_check_should_report_the_first_error(self, jit):
def f(x):
error_check.set_error_if(x >= 1, "x must be less than 1 in f")
return x + 1
def g(x):
error_check.set_error_if(x >= 1, "x must be less than 1 in g")
return x + 1
if jit:
f = jax.jit(f)
g = jax.jit(g)
x = jnp.full((4,), 0, dtype=jnp.int32)
x = f(x) # check passes, so it should not set error
x = g(x) # check fails. so it should set error
_ = f(x) # check fails, but should not override the error
with self.assertRaisesRegex(JaxValueError, "x must be less than 1 in g"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
def test_raise_if_error_clears_error(self, jit):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0 in f")
return x + 1
def g(x):
error_check.set_error_if(x <= 0, "x must be greater than 0 in g")
return x + 1
if jit:
f = jax.jit(f)
g = jax.jit(g)
x = jnp.full((4,), -1, dtype=jnp.int32)
f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0 in f"):
error_check.raise_if_error()
error_check.raise_if_error() # should not raise error
g(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0 in g"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
def test_error_includes_traceback(self, jit):
def function_that_triggers_error_for_traceback_test(x):
error_check.set_error_if( # This line must be included in the traceback.
x <= 0, "x must be greater than 0"
)
return x + 1
if jit:
function_that_triggers_error_for_traceback_test = jax.jit(
function_that_triggers_error_for_traceback_test
)
x = jnp.zeros((4,), dtype=jnp.int32)
function_that_triggers_error_for_traceback_test(x)
tb_string = ""
try:
error_check.raise_if_error()
except JaxValueError as e:
tb_string = traceback.format_tb(e.__traceback__)
tb_string = "".join(tb_string)
self.assertIn("function_that_triggers_error_for_traceback_test", tb_string)
self.assertIn("This line must be included in the traceback", tb_string)
@parameterized.product(jit=[True, False])
def test_error_check_works_with_cond(self, jit):
def f(x):
error_check.set_error_if(x == 0, "x must be non-zero in f")
return x + 1
def g(x):
error_check.set_error_if(x == 0, "x must be non-zero in g")
return x + 1
def body(pred, x):
return jax.lax.cond(pred, f, g, x)
if jit:
body = jax.jit(body)
x = jnp.zeros((4,), dtype=jnp.int32)
_ = body(jnp.bool_(True), x)
with self.assertRaisesRegex(JaxValueError, "x must be non-zero in f"):
error_check.raise_if_error()
_ = body(jnp.bool_(False), x)
with self.assertRaisesRegex(JaxValueError, "x must be non-zero in g"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
def test_error_check_works_with_while_loop(self, jit):
def f(x):
error_check.set_error_if(x >= 10, "x must be less than 10")
return x + 1
def body(x):
return jax.lax.while_loop(lambda x: (x < 10).any(), f, x)
if jit:
body = jax.jit(body)
x = jnp.arange(4, dtype=jnp.int32)
_ = body(x)
with self.assertRaisesRegex(JaxValueError, "x must be less than 10"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
def test_error_check_works_with_scan(self, jit):
def f(carry, x):
error_check.set_error_if(x >= 4, "x must be less than 4")
return carry + x, x + 1
def body(init, xs):
return jax.lax.scan(f, init=init, xs=xs)
if jit:
body = jax.jit(body)
init = jnp.int32(0)
xs = jnp.arange(5, dtype=jnp.int32)
_ = body(init, xs)
with self.assertRaisesRegex(JaxValueError, "x must be less than 4"):
error_check.raise_if_error()
xs = jnp.arange(4, dtype=jnp.int32)
_ = body(init, xs)
error_check.raise_if_error() # should not raise error
@parameterized.product(jit=[True, False])
def test_raise_if_error_fails_in_traced_context(self, jit):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
if jit:
f = jax.jit(f)
x = jnp.full((4,), 1, dtype=jnp.int32)
f(x)
with self.assertRaises(
ValueError,
msg=(
"raise_if_error() should not be called within a traced context,"
" such as within a jitted function."
),
):
jax.jit(error_check.raise_if_error)()
@parameterized.product(jit=[True, False])
@jtu.with_explicit_mesh((2, 2), ("x", "y"))
def test_error_check_explicit_mode(self, mesh, jit):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
if jit:
f = jax.jit(f)
with error_check.error_checking_context():
x = jnp.full((4, 4), -1, dtype=jnp.int32)
f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
sharding = NamedSharding(mesh, P("x", "y"))
with error_check.error_checking_context():
y = jnp.full((4, 4), -1, dtype=jnp.int32, device=sharding)
f(y)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
# The unsharded version of `f` should still be able to check errors after
# exiting the error checking context.
f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
@parameterized.product(jit=[True, False])
@jtu.with_explicit_mesh(
(2, 2),
("x", "y"),
axis_types=(mesh_lib.AxisType.Auto, mesh_lib.AxisType.Auto),
)
@jtu.ignore_warning(
message=(
"When at least one mesh axis of `pred` is in auto mode, calling"
" `set_error_if` will cause implicit communication between devices."
" To avoid this, consider converting the mesh axis in auto mode to"
" explicit mode."
),
category=RuntimeWarning,
)
def test_error_check_auto_mode(self, jit, mesh):
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
if jit:
f = jax.jit(f)
with error_check.error_checking_context():
sharding = NamedSharding(mesh, P("x", "y"))
x = jnp.full((4, 4), -1, dtype=jnp.int32, device=sharding)
f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
def test_error_check_aot(self):
def run_export():
def f(x):
error_check.set_error_if(x <= 0, "x must be greater than 0")
return x + 1
f = jax.jit(error_check.wrap_for_export(jax.jit(f)))
x = jax.ShapeDtypeStruct((), jnp.float32)
serialized = jax.export.export(f)(x).serialize()
return serialized
def run_import(serialized):
f = jax.export.deserialize(serialized).call
f = jax.jit(error_check.unwrap_from_import(jax.jit(f)))
x = jnp.float32(-3.)
_ = f(x)
with self.assertRaisesRegex(JaxValueError, "x must be greater than 0"):
error_check.raise_if_error()
serialized = run_export()
run_import(serialized)
def test_error_check_aot_includes_traceback(self):
def run_export():
def function_that_triggers_error_for_traceback_test(x):
error_check.set_error_if( # This line must be included in the traceback
x <= 0, "x must be greater than 0"
)
return x + 1
f = jax.jit(
error_check.wrap_for_export(
jax.jit(function_that_triggers_error_for_traceback_test)
)
)
x = jax.ShapeDtypeStruct((), jnp.float32)
serialized = jax.export.export(f)(x).serialize()
return serialized
def run_import(serialized):
f = jax.export.deserialize(serialized).call
f = jax.jit(error_check.unwrap_from_import(jax.jit(f)))
x = jnp.float32(-3.0)
_ = f(x)
msg = ""
try:
error_check.raise_if_error()
except JaxValueError as e:
msg = str(e)
self.assertIn("function_that_triggers_error_for_traceback_test", msg)
self.assertIn("This line must be included in the traceback", msg)
serialized = run_export()
run_import(serialized)
def test_error_check_aot_should_not_override_existing_error(self):
def f1(x):
error_check.set_error_if(x <= 0, "x must be greater than 0 in f1")
return x + 1
def run_export():
def f2(x):
error_check.set_error_if(x <= 0, "x must be greater than 0 in f2")
return x + 1
f2 = jax.jit(error_check.wrap_for_export(jax.jit(f2)))
x = jax.ShapeDtypeStruct((), jnp.float32)
serialized = jax.export.export(f2)(x).serialize()
return serialized
def run_import(serialized):
f2 = jax.export.deserialize(serialized).call
f2 = jax.jit(error_check.unwrap_from_import(jax.jit(f2)))
return f2
x = jnp.float32(-3.)
_ = f1(x) # check fails. so it should set error
serialized = run_export()
f2 = run_import(serialized)
_ = f2(x) # check fails, but should not override the error
with self.assertRaisesRegex(
JaxValueError, "x must be greater than 0 in f1"
):
error_check.raise_if_error()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
ErrorCheckTests
|
python
|
ray-project__ray
|
python/ray/data/preprocessors/scaler.py
|
{
"start": 564,
"end": 5039
}
|
class ____(SerializablePreprocessorBase):
r"""Translate and scale each column by its mean and standard deviation,
respectively.
The general formula is given by
.. math::
x' = \frac{x - \bar{x}}{s}
where :math:`x` is the column, :math:`x'` is the transformed column,
:math:`\bar{x}` is the column average, and :math:`s` is the column's sample
standard deviation. If :math:`s = 0` (i.e., the column is constant-valued),
then the transformed column will contain zeros.
.. warning::
:class:`StandardScaler` works best when your data is normal. If your data isn't
approximately normal, then the transformed features won't be meaningful.
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import StandardScaler
>>>
>>> df = pd.DataFrame({"X1": [-2, 0, 2], "X2": [-3, -3, 3], "X3": [1, 1, 1]})
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> ds.to_pandas() # doctest: +SKIP
X1 X2 X3
0 -2 -3 1
1 0 -3 1
2 2 3 1
Columns are scaled separately.
>>> preprocessor = StandardScaler(columns=["X1", "X2"])
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3
0 -1.224745 -0.707107 1
1 0.000000 -0.707107 1
2 1.224745 1.414214 1
Constant-valued columns get filled with zeros.
>>> preprocessor = StandardScaler(columns=["X3"])
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3
0 -2 -3 0.0
1 0 -3 0.0
2 2 3 0.0
>>> preprocessor = StandardScaler(
... columns=["X1", "X2"],
... output_columns=["X1_scaled", "X2_scaled"]
... )
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3 X1_scaled X2_scaled
0 -2 -3 1 -1.224745 -0.707107
1 0 -3 1 0.000000 -0.707107
2 2 3 1 1.224745 1.414214
Args:
columns: The columns to separately scale.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
"""
def __init__(self, columns: List[str], output_columns: Optional[List[str]] = None):
super().__init__()
self.columns = columns
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
self.stat_computation_plan.add_aggregator(
aggregator_fn=Mean,
columns=self.columns,
)
self.stat_computation_plan.add_aggregator(
aggregator_fn=lambda col: Std(col, ddof=0),
columns=self.columns,
)
return self
def _transform_pandas(self, df: pd.DataFrame):
def column_standard_scaler(s: pd.Series):
s_mean = self.stats_[f"mean({s.name})"]
s_std = self.stats_[f"std({s.name})"]
if s_std is None or s_mean is None:
s[:] = np.nan
return s
# Handle division by zero.
# TODO: extend this to handle near-zero values.
if s_std == 0:
s_std = 1
return (s - s_mean) / s_std
df[self.output_columns] = df[self.columns].transform(column_standard_scaler)
return df
def _get_serializable_fields(self) -> Dict[str, Any]:
return {
"columns": self.columns,
"output_columns": self.output_columns,
"_fitted": getattr(self, "_fitted", None),
}
def _set_serializable_fields(self, fields: Dict[str, Any], version: int):
# required fields
self.columns = fields["columns"]
self.output_columns = fields["output_columns"]
# optional fields
self._fitted = fields.get("_fitted")
def __repr__(self):
return f"{self.__class__.__name__}(columns={self.columns!r}, output_columns={self.output_columns!r})"
@PublicAPI(stability="alpha")
@SerializablePreprocessor(version=1, identifier="io.ray.preprocessors.min_max_scaler")
|
StandardScaler
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink46.py
|
{
"start": 306,
"end": 1401
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink46.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename, {"max_url_length": 255})
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Foo")
worksheet.write("A3", "Bar")
# Ignore the warnings raised in the following code.
import warnings
warnings.filterwarnings("ignore")
# This link is too long and should be ignored, with a warning.
worksheet.write_url(
"A2",
"http://foo.com/this_is_a_long_hyperlink_that_exceeds_a_limit_of_255_characters_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_statistical_events.py
|
{
"start": 1618,
"end": 7841
}
|
class ____:
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __hash__(self):
return 0
def __str__(self):
seen.append(self)
global counter
counter += 1
return f"COUNTER {counter}"
def test_formats_are_evaluated_only_once():
global counter
counter = 0
@given(st.integers())
def test(i):
event(Foo())
stats = call_for_statistics(test)
assert "COUNTER 1" in unique_events(stats)
assert "COUNTER 2" not in unique_events(stats)
def test_does_not_report_on_examples():
@example("hi")
@given(st.integers())
def test(i):
if isinstance(i, str):
event("boo")
stats = call_for_statistics(test)
assert not unique_events(stats)
def test_exact_timing():
@settings(suppress_health_check=[HealthCheck.too_slow], deadline=None)
@given(st.integers())
def test(i):
time.sleep(0.5)
stats = describe_statistics(call_for_statistics(test))
assert "~ 500ms" in stats
def test_apparently_instantaneous_tests():
time.freeze()
@given(st.integers())
def test(i):
pass
stats = describe_statistics(call_for_statistics(test))
assert "< 1ms" in stats
@xfail_on_crosshair(Why.other) # crosshair re-executes for flakiness itself
def test_flaky_exit():
first = True
@settings(derandomize=True)
@given(st.integers())
def test(i):
nonlocal first
if i > 1001:
if first:
first = False
raise AssertionError
stats = call_for_statistics(test)
assert stats["stopped-because"] == "test was flaky"
@pytest.mark.parametrize("draw_delay", [False, True])
@pytest.mark.parametrize("test_delay", [False, True])
def test_draw_timing(draw_delay, test_delay):
time.freeze()
@st.composite
def s(draw):
if draw_delay:
time.sleep(0.05)
draw(st.integers())
@given(s())
def test(_):
if test_delay:
time.sleep(0.05)
stats = describe_statistics(call_for_statistics(test))
if not draw_delay:
assert "< 1ms" in stats
else:
match = re.search(r"of which ~ (?P<gentime>\d+)", stats)
assert 49 <= int(match.group("gentime")) <= 51
def test_has_lambdas_in_output():
@settings(max_examples=100, database=None)
@given(st.integers().filter(lambda x: x % 2 == 0))
def test(i):
pass
stats = call_for_statistics(test)
assert any("lambda x: x % 2 == 0" in e for e in unique_events(stats))
def test_stops_after_x_shrinks(monkeypatch):
# the max_shrinks argument is deprecated, but we still stop after some
# number - which we can reduce to zero to check that this works.
from hypothesis.internal.conjecture import engine
monkeypatch.setattr(engine, "MAX_SHRINKS", 0)
@given(st.integers(min_value=0))
def test(n):
assert n < 10
stats = call_for_statistics(test)
assert "shrunk example" in stats["stopped-because"]
def test_stateful_states_are_deduped():
class DemoStateMachine(stateful.RuleBasedStateMachine):
Stuff = stateful.Bundle("stuff")
@stateful.rule(target=Stuff, name=st.text())
def create_stuff(self, name):
return name
@stateful.rule(item=Stuff)
def do(self, item):
return
stats = call_for_statistics(DemoStateMachine.TestCase().runTest)
stats = unique_events(stats)
stats = [s for s in stats if not s.startswith("invalid because: (internal)")]
assert len(stats) <= 2
def test_stateful_with_one_of_bundles_states_are_deduped():
class DemoStateMachine(stateful.RuleBasedStateMachine):
Things = stateful.Bundle("things")
Stuff = stateful.Bundle("stuff")
StuffAndThings = Things | Stuff
@stateful.rule(target=Things, name=st.text())
def create_thing(self, name):
return name
@stateful.rule(target=Stuff, name=st.text())
def create_stuff(self, name):
return name
@stateful.rule(item=StuffAndThings)
def do(self, item):
return
stats = call_for_statistics(DemoStateMachine.TestCase().runTest)
stats = unique_events(stats)
stats = [s for s in stats if not s.startswith("invalid because: (internal)")]
assert len(stats) <= 4
def test_statistics_for_threshold_problem():
@settings(max_examples=100, database=None)
@given(st.floats(min_value=0, allow_infinity=False))
def threshold(error):
target(error, label="error")
assert error <= 10
target(0.0, label="never in failing example")
stats = call_for_statistics(threshold)
assert " - Highest target scores:" in describe_statistics(stats)
assert "never in failing example" in describe_statistics(stats)
# Check that we report far-from-threshold failing examples
assert stats["targets"]["error"] > 10
# describe_statistics causes not-deterministic crosshair errors for some reason?
@xfail_on_crosshair(Why.other)
def test_statistics_with_events_and_target():
@given(st.integers(0, 10_000))
def test(value):
event(value)
target(float(value), label="a target")
stats = describe_statistics(call_for_statistics(test))
assert "- Events:" in stats
assert "- Highest target score: " in stats
@given(st.booleans())
def test_event_with_non_weakrefable_keys(b):
event((b,))
def test_assume_adds_event_with_function_origin():
@given(st.integers())
def very_distinguishable_name(n):
assume(n > 100)
stats = call_for_statistics(very_distinguishable_name)
for tc in stats["generate-phase"]["test-cases"]:
for e in tc["events"]:
assert "failed to satisfy assume() in very_distinguishable_name" in e
def test_reject_adds_event_with_function_origin():
@given(st.integers())
def very_distinguishable_name(n):
if n > 100:
reject()
stats = call_for_statistics(very_distinguishable_name)
for tc in stats["generate-phase"]["test-cases"]:
for e in tc["events"]:
assert "reject() in very_distinguishable_name" in e
|
Foo
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 117870,
"end": 118458
}
|
class ____(Operation):
def call(self, x):
return backend.numpy.imag(x)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.imag", "keras.ops.numpy.imag"])
def imag(x):
"""Return the imaginary part of the complex argument.
Args:
x: Input tensor.
Returns:
The imaginary component of the complex argument.
"""
if any_symbolic_tensors((x,)):
return Imag().symbolic_call(x)
return backend.numpy.imag(x)
|
Imag
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
|
{
"start": 5576,
"end": 5681
}
|
class ____(MetafieldShopifySubstream):
parent_stream_class = SmartCollections
|
MetafieldSmartCollections
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/compute.py
|
{
"start": 1884,
"end": 2171
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Compute Instance Group Manager details Link."""
name = "Compute Instance Group Manager"
key = "compute_instance_group_manager_details"
format_str = COMPUTE_GROUP_MANAGER_LINK
|
ComputeInstanceGroupManagerDetailsLink
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/override1.py
|
{
"start": 534,
"end": 1551
}
|
class ____(ClassA, ClassB):
@property
@override
# This should generate an error because prop_a doesn't
# override anything in its base class.
def prop_a(self) -> int:
raise NotImplementedError
@override
def method1(self) -> None:
pass
def method2(self) -> None:
pass
@override
def method3(self) -> None:
pass
@override
# This should generate an error because method3 does not
# override anything in a base class.
def method4(self) -> None:
pass
@overload
def method5(self, x: int) -> int: ...
@overload
def method5(self, x: str) -> str: ...
@override
def method5(self, x: int | str) -> int | str: ...
@overload
def method6(self, x: int) -> int: ...
@overload
def method6(self, x: str) -> str: ...
@override
# This should generate an error because method6 does not
# override anything in a base class.
def method6(self, x: int | str) -> int | str: ...
|
ClassC
|
python
|
FactoryBoy__factory_boy
|
factory/faker.py
|
{
"start": 341,
"end": 2006
}
|
class ____(declarations.BaseDeclaration):
"""Wrapper for 'faker' values.
Args:
provider (str): the name of the Faker field
locale (str): the locale to use for the faker
All other kwargs will be passed to the underlying provider
(e.g ``factory.Faker('ean', length=10)``
calls ``faker.Faker.ean(length=10)``)
Usage:
>>> foo = factory.Faker('name')
"""
def __init__(self, provider, **kwargs):
locale = kwargs.pop('locale', None)
self.provider = provider
super().__init__(
locale=locale,
**kwargs)
def evaluate(self, instance, step, extra):
locale = extra.pop('locale')
subfaker = self._get_faker(locale)
return subfaker.format(self.provider, **extra)
_FAKER_REGISTRY: Dict[str, faker.Faker] = {}
_DEFAULT_LOCALE = faker.config.DEFAULT_LOCALE
@classmethod
@contextlib.contextmanager
def override_default_locale(cls, locale):
old_locale = cls._DEFAULT_LOCALE
cls._DEFAULT_LOCALE = locale
try:
yield
finally:
cls._DEFAULT_LOCALE = old_locale
@classmethod
def _get_faker(cls, locale=None):
if locale is None:
locale = cls._DEFAULT_LOCALE
if locale not in cls._FAKER_REGISTRY:
subfaker = faker.Faker(locale=locale)
cls._FAKER_REGISTRY[locale] = subfaker
return cls._FAKER_REGISTRY[locale]
@classmethod
def add_provider(cls, provider, locale=None):
"""Add a new Faker provider for the specified locale"""
cls._get_faker(locale).add_provider(provider)
|
Faker
|
python
|
jina-ai__jina
|
tests/unit/orchestrate/flow/flow-construct/test_flow_except.py
|
{
"start": 5333,
"end": 6918
}
|
class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise Exception
@pytest.mark.repeat(10)
@pytest.mark.timeout(10)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_startup_exception_not_hanging2(protocol):
f = Flow(protocol=protocol).add(uses=ExceptionExecutor2)
from jina.excepts import RuntimeFailToStart
with pytest.raises(RuntimeFailToStart):
with f:
pass
@pytest.mark.timeout(10)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_startup_exception_not_hanging_filenotfound(protocol):
f = Flow(protocol=protocol).add(uses='doesntexist.yml')
from jina.excepts import RuntimeFailToStart
with pytest.raises(RuntimeFailToStart):
with f:
pass
@pytest.mark.timeout(10)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_startup_exception_not_hanging_invalid_config(protocol):
this_file = os.path.dirname(os.path.abspath(__file__))
f = Flow(protocol=protocol).add(
name='importErrorExecutor',
uses=this_file,
)
with pytest.raises(RuntimeFailToStart):
with f:
pass
def test_flow_does_not_import_exec_dependencies():
cur_dir = os.path.dirname(os.path.abspath(__file__))
f = Flow().add(
name='importErrorExecutor',
uses=os.path.join(cur_dir, 'executor-invalid-import/config.yml'),
)
with pytest.raises(RuntimeFailToStart):
with f:
pass
|
ExceptionExecutor2
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/widgets.py
|
{
"start": 113262,
"end": 113516
}
|
class ____(enum.Enum):
ROTATE = enum.auto()
MOVE = enum.auto()
RESIZE = enum.auto()
CREATE = enum.auto()
@_docstring.Substitution(_RECTANGLESELECTOR_PARAMETERS_DOCSTRING.replace(
'__ARTIST_NAME__', 'rectangle'))
|
_RectangleSelectorAction
|
python
|
django__django
|
tests/settings_tests/tests.py
|
{
"start": 18191,
"end": 22187
}
|
class ____(SimpleTestCase):
"""
The override_settings context manager restore settings if one of the
receivers of "setting_changed" signal fails. Check the three cases of
receiver failure detailed in receiver(). In each case, ALL receivers are
called when exiting the context manager.
"""
def setUp(self):
signals.setting_changed.connect(self.receiver)
self.addCleanup(signals.setting_changed.disconnect, self.receiver)
# Create a spy that's connected to the `setting_changed` signal and
# executed AFTER `self.receiver`.
self.spy_receiver = mock.Mock()
signals.setting_changed.connect(self.spy_receiver)
self.addCleanup(signals.setting_changed.disconnect, self.spy_receiver)
def receiver(self, **kwargs):
"""
A receiver that fails while certain settings are being changed.
- SETTING_BOTH raises an error while receiving the signal
on both entering and exiting the context manager.
- SETTING_ENTER raises an error only on enter.
- SETTING_EXIT raises an error only on exit.
"""
setting = kwargs["setting"]
enter = kwargs["enter"]
if setting in ("SETTING_BOTH", "SETTING_ENTER") and enter:
raise SettingChangeEnterException
if setting in ("SETTING_BOTH", "SETTING_EXIT") and not enter:
raise SettingChangeExitException
def check_settings(self):
"""Assert that settings for these tests aren't present."""
self.assertFalse(hasattr(settings, "SETTING_BOTH"))
self.assertFalse(hasattr(settings, "SETTING_ENTER"))
self.assertFalse(hasattr(settings, "SETTING_EXIT"))
self.assertFalse(hasattr(settings, "SETTING_PASS"))
def check_spy_receiver_exit_calls(self, call_count):
"""
Assert that `self.spy_receiver` was called exactly `call_count` times
with the ``enter=False`` keyword argument.
"""
kwargs_with_exit = [
kwargs
for args, kwargs in self.spy_receiver.call_args_list
if ("enter", False) in kwargs.items()
]
self.assertEqual(len(kwargs_with_exit), call_count)
def test_override_settings_both(self):
"""Receiver fails on both enter and exit."""
with self.assertRaises(SettingChangeEnterException):
with override_settings(SETTING_PASS="BOTH", SETTING_BOTH="BOTH"):
pass
self.check_settings()
# Two settings were touched, so expect two calls of `spy_receiver`.
self.check_spy_receiver_exit_calls(call_count=2)
def test_override_settings_enter(self):
"""Receiver fails on enter only."""
with self.assertRaises(SettingChangeEnterException):
with override_settings(SETTING_PASS="ENTER", SETTING_ENTER="ENTER"):
pass
self.check_settings()
# Two settings were touched, so expect two calls of `spy_receiver`.
self.check_spy_receiver_exit_calls(call_count=2)
def test_override_settings_exit(self):
"""Receiver fails on exit only."""
with self.assertRaises(SettingChangeExitException):
with override_settings(SETTING_PASS="EXIT", SETTING_EXIT="EXIT"):
pass
self.check_settings()
# Two settings were touched, so expect two calls of `spy_receiver`.
self.check_spy_receiver_exit_calls(call_count=2)
def test_override_settings_reusable_on_enter(self):
"""
Error is raised correctly when reusing the same override_settings
instance.
"""
@override_settings(SETTING_ENTER="ENTER")
def decorated_function():
pass
with self.assertRaises(SettingChangeEnterException):
decorated_function()
signals.setting_changed.disconnect(self.receiver)
# This call shouldn't raise any errors.
decorated_function()
|
OverrideSettingsIsolationOnExceptionTests
|
python
|
ansible__ansible
|
test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_reboot.py
|
{
"start": 717,
"end": 3875
}
|
class ____(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'boot_time_command',
'connect_timeout',
'connect_timeout_sec',
'msg',
'post_reboot_delay',
'post_reboot_delay_sec',
'pre_reboot_delay',
'pre_reboot_delay_sec',
'reboot_timeout',
'reboot_timeout_sec',
'shutdown_timeout',
'shutdown_timeout_sec',
'test_command',
))
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return {'changed': True, 'elapsed': 0, 'rebooted': True}
if task_vars is None:
task_vars = {}
super(ActionModule, self).run(tmp, task_vars)
parameters = {}
for names, check_func in [
(['boot_time_command'], check_type_str),
(['connect_timeout', 'connect_timeout_sec'], _positive_float),
(['msg'], check_type_str),
(['post_reboot_delay', 'post_reboot_delay_sec'], _positive_float),
(['pre_reboot_delay', 'pre_reboot_delay_sec'], _positive_float),
(['reboot_timeout', 'reboot_timeout_sec'], _positive_float),
(['test_command'], check_type_str),
]:
for name in names:
value = self._task.args.get(name, None)
if value:
break
else:
value = None
# Defaults are applied in reboot_action so skip adding to kwargs if the input wasn't set (None)
if value is not None:
try:
value = check_func(value)
except TypeError as e:
raise AnsibleError("Invalid value given for '%s': %s." % (names[0], to_native(e)))
# Setting a lower value and kill PowerShell when sending the shutdown command. Just use the defaults
# if this is the case.
if names[0] == 'pre_reboot_delay' and value < 2:
continue
parameters[names[0]] = value
result = reboot_host(self._task.action, self._connection, **parameters)
# Not needed for testing and collection_name kwargs causes sanity error
# Historical behaviour had ignore_errors=True being able to ignore unreachable hosts and not just task errors.
# This snippet will allow that to continue but state that it will be removed in a future version and to use
# ignore_unreachable to ignore unreachable hosts.
# if result['unreachable'] and self._task.ignore_errors and not self._task.ignore_unreachable:
# dep_msg = "Host was unreachable but is being skipped because ignore_errors=True is set. In the future " \
# "only ignore_unreachable will be able to ignore an unreachable host for %s" % self._task.action
# display.deprecated(dep_msg, date="2023-05-01", collection_name="ansible.windows")
# result['unreachable'] = False
# result['failed'] = True
return result
|
ActionModule
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/group_batch_fusion.py
|
{
"start": 50483,
"end": 59332
}
|
class ____:
def __init__(self, param=None) -> None:
if param:
self.rep = OrderedDict(dict.fromkeys(param))
else:
self.rep = OrderedDict()
def __contains__(self, o) -> bool:
return o in self.rep
def __len__(self) -> int:
return self.rep.__len__()
def append(self, o):
self.rep[o] = None
def __iter__(self):
return self.rep.keys().__iter__()
def find_independent_subset_greedy(
node_list: Iterable[torch.fx.Node],
graph_search_options: dict[str, Any],
) -> Iterator[Iterable[torch.fx.Node]]:
"""
Yields a list of subsets of `node_list` where no element in the subset
depends on any other element in the subset. This results in a set of
independent nodes which can be fused together.
The order of `node_list` is preserved within each subset so we can benefit
from split-cat elimination in later passes.
During iteration it is only safe to mutate the graph by changing the nodes
that have been returned.
graph_search_options:
- min_fuse_set_size: Minimum size of the subset to consider. Subsets below
this size will be ignored.
- max_fuse_set_size: Maximum size of the subset to consider. Subsets will
be broken to be at most this size.
"""
# Compute all the children of `node` which are members of
# `interesting_nodes`.
def find_dependent_nodes(node, interesting_nodes):
visited_node_set = OrderedSet[torch.fx.Node]()
dep_set = OrderedSet[torch.fx.Node]()
work = [node]
while work:
node = work.pop()
for input_node in node.all_input_nodes:
if input_node in interesting_nodes:
dep_set.add(input_node)
if input_node not in visited_node_set:
visited_node_set.add(input_node)
work.append(input_node)
return dep_set
min_fuse_set_size = graph_search_options["min_fuse_set_size"]
max_fuse_set_size = graph_search_options["max_fuse_set_size"]
# node_list needs to be a set because we only track the nodes that are left
# in it (and we want to do the `in` on a set, not a list). But we want to
# keep the correct order.
node_list = _OrderedSet(node_list)
cache: dict[torch.fx.Node, OrderedSet[torch.fx.Node]] = {}
while node_list:
subset: list[torch.fx.Node] = []
subset_deps = OrderedSet[torch.fx.Node]()
next_round_node_list = _OrderedSet()
for node in node_list:
if len(subset) >= max_fuse_set_size or node in subset_deps:
next_round_node_list.append(node)
continue
dep_set = cache.pop(node, None)
if dep_set is None:
dep_set = find_dependent_nodes(node, node_list)
if not dep_set.intersection(subset):
subset.append(node)
subset_deps.update(dep_set)
else:
next_round_node_list.append(node)
cache[node] = dep_set
if len(subset) >= min_fuse_set_size:
# Careful here - the caller uses the subsets to fuse nodes together
# so we need to clear any cache entry that contains one of the
# returned nodes because the dependency list could be different
# (larger) after the merge.
cache = {k: v for k, v in cache.items() if v.isdisjoint(subset)}
yield subset
node_list = next_round_node_list
def get_fusion_candidates(
rule: GroupBatchFusionBase,
root_node: torch.fx.Node,
fused_set: OrderedSet[torch.fx.Node],
) -> collections.defaultdict[Any, list[torch.fx.Node]]:
"""
Search fusion candidates for a specific rule using BFS starting from the root node.
We only search the subgraph within graph_search_options["max_fuse_search_depth"].
"""
q: collections.deque[tuple[int, torch.fx.Node]] = collections.deque()
candidate_dict: collections.defaultdict[Any, list[torch.fx.Node]] = (
collections.defaultdict(list)
)
if root_node.target in SEARCH_EXCLUSIONS:
return candidate_dict
visited_set = OrderedSet[torch.fx.Node]()
for next_node in root_node.all_input_nodes:
q.append((1, next_node))
visited_set.add(next_node)
while len(q) > 0:
depth, node = q.popleft()
if node in fused_set:
continue
key = rule.match(node)
if key is not None:
candidate_nodes = candidate_dict[key]
if node not in candidate_nodes:
candidate_nodes.append(node)
else:
if depth < rule.graph_search_options["max_fuse_search_depth"]:
for next_node in node.all_input_nodes:
if next_node not in visited_set:
visited_set.add(next_node)
q.append((depth + 1, next_node))
return candidate_dict
def apply_group_batch_fusion(graph: torch.fx.GraphModule, rule: GroupBatchFusionBase):
stable_topological_sort(graph) # type: ignore[arg-type]
fused_set = OrderedSet[torch.fx.Node]()
log_to_scuba = False
for node in reversed(graph.nodes): # type: ignore[arg-type]
candidates = get_fusion_candidates(rule, node, fused_set)
for key, candidate_nodes in candidates.items():
if len(candidate_nodes) < rule.graph_search_options["min_fuse_set_size"]:
continue
for subset in find_independent_subset_greedy(
candidate_nodes, rule.graph_search_options
):
rule.fuse(graph, subset)
fused_set.update(subset)
log.debug(
f"{rule.__class__.__name__}: key = {key}; subset size = {len(list(subset))}" # noqa: G004
)
log_to_scuba = True
if log_to_scuba:
from torch.fx._lazy_graph_module import _LazyGraphModule
# Force graph to re-compile otherwise the output python code may be broken
gm = graph._owning_module
if isinstance(gm, _LazyGraphModule):
_LazyGraphModule.recompile()
else:
assert isinstance(gm, torch.fx.GraphModule)
gm.recompile()
graph_str = gm.print_readable(
print_output=False, include_stride=True, include_device=True
)
name = f"optimus_{str(rule.__class__.__name__)}"
if "MTIA" in name:
name = f"cff_{str(rule.__class__.__name__)}"
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": name,
"encoding": "string",
},
payload_fn=lambda: graph_str,
)
def generate_fusion_from_config(config_options: dict[str, Any], pre_grad=True):
fusions: list[GroupBatchFusionBase] = []
for name, options in config_options.items():
# we skip all patterns from pattern_matcher passes (e.g., split_cat)
if name not in PRE_GRAD_FUSIONS and name not in POST_GRAD_FUSIONS:
continue
fusion_cls = PRE_GRAD_FUSIONS[name] if pre_grad else POST_GRAD_FUSIONS[name]
_options = graph_search_options.copy()
_options.update(options)
fusions.append(fusion_cls(graph_search_options=_options)) # type: ignore[operator]
return fusions
def group_batch_fusion_passes(graph: torch.fx.Graph, pre_grad=True):
fusions: list[GroupBatchFusionBase] = []
# we keep all current pre grad fusions to keep
# current implementation, will remove this later
if pre_grad:
fusions += generate_fusion_from_config(
config.pre_grad_fusion_options, pre_grad=True
)
else:
fbgemm_fusion_keys = [
x
for x in config.post_grad_fusion_options
if (
x not in OPTIMUS_EXCLUDE_POST_GRAD
and config.post_grad_fusion_options[x].get("require_fbgemm", False)
)
]
fbgemm_fusions = {
fusion: config.post_grad_fusion_options[fusion]
for fusion in fbgemm_fusion_keys
}
non_fbgemm_fusions = {
fusion: config.post_grad_fusion_options[fusion]
for fusion in config.post_grad_fusion_options
if fusion not in fbgemm_fusion_keys
}
fusions += generate_fusion_from_config(non_fbgemm_fusions, pre_grad=False)
if has_fbgemm:
fusions += generate_fusion_from_config(fbgemm_fusions, pre_grad=False)
for i, rule in enumerate(fusions):
with GraphTransformObserver(
graph.owning_module,
f"group_batch_fusion_{i}",
):
apply_group_batch_fusion(graph, rule) # type: ignore[arg-type]
|
_OrderedSet
|
python
|
dask__distributed
|
distributed/scheduler.py
|
{
"start": 336882,
"end": 338096
}
|
class ____(Exception):
def __init__(
self,
task: Key,
host_restrictions: set[str],
worker_restrictions: set[str],
resource_restrictions: dict[str, float],
timeout: float,
):
super().__init__(
task, host_restrictions, worker_restrictions, resource_restrictions, timeout
)
@property
def task(self) -> Key:
return self.args[0]
@property
def host_restrictions(self) -> Any:
return self.args[1]
@property
def worker_restrictions(self) -> Any:
return self.args[2]
@property
def resource_restrictions(self) -> Any:
return self.args[3]
@property
def timeout(self) -> float:
return self.args[4]
def __str__(self) -> str:
return (
f"Attempted to run task {self.task!r} but timed out after {format_time(self.timeout)} "
"waiting for a valid worker matching all restrictions.\n\nRestrictions:\n"
f"host_restrictions={self.host_restrictions!s}\n"
f"worker_restrictions={self.worker_restrictions!s}\n"
f"resource_restrictions={self.resource_restrictions!s}\n"
)
|
NoValidWorkerError
|
python
|
huggingface__transformers
|
src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py
|
{
"start": 2004,
"end": 4368
}
|
class ____(LlamaAttention):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.query_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.key_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.query_layernorm(query_states)
key_states = self.key_layernorm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
HunYuanDenseV1Attention
|
python
|
django__django
|
tests/backends/base/test_schema.py
|
{
"start": 138,
"end": 672
}
|
class ____(SimpleTestCase):
def test_effective_default_callable(self):
"""
SchemaEditor.effective_default() shouldn't call callable defaults.
"""
class MyStr(str):
def __call__(self):
return self
class MyCharField(models.CharField):
def _get_default(self):
return self.default
field = MyCharField(max_length=1, default=MyStr)
self.assertEqual(BaseDatabaseSchemaEditor._effective_default(field), MyStr)
|
SchemaEditorTests
|
python
|
pypa__warehouse
|
warehouse/email/services.py
|
{
"start": 4589,
"end": 4969
}
|
class ____(SMTPEmailSender):
def send(self, recipient, message):
super().send(recipient=recipient, message=message)
print(
f"""Email sent
Subject: {message.subject}
From: {self.sender if message.sender is None else message.sender}
To: {recipient}
HTML: Visualize at http://localhost:1080
Text: {message.body_text}"""
)
|
ConsoleAndSMTPEmailSender
|
python
|
openai__openai-python
|
src/openai/resources/beta/realtime/realtime.py
|
{
"start": 8110,
"end": 8617
}
|
class ____:
def __init__(self, realtime: AsyncRealtime) -> None:
self._realtime = realtime
@cached_property
def sessions(self) -> AsyncSessionsWithStreamingResponse:
return AsyncSessionsWithStreamingResponse(self._realtime.sessions)
@cached_property
def transcription_sessions(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
return AsyncTranscriptionSessionsWithStreamingResponse(self._realtime.transcription_sessions)
|
AsyncRealtimeWithStreamingResponse
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.