language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 10352,
"end": 14843
} | class ____(nn.Module):
"""
This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the
Transformer block.
"""
def __init__(self, config: ClapAudioConfig):
super().__init__()
img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size
patch_size = (
(config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size
)
patch_stride = (
(config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride
)
self.img_size = img_size
self.patch_stride = patch_stride
self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = config.flatten_patch_embeds
self.enable_fusion = config.enable_fusion
padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1
self.proj = nn.Conv2d(
config.patch_embed_input_channels * scale_factor,
config.patch_embeds_hidden_size,
kernel_size=patch_size,
stride=patch_stride,
padding=padding,
)
self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity()
if self.enable_fusion:
self.fusion_model = ClapAudioAFFBlock(config)
self.mel_conv2d = nn.Conv2d(
config.patch_embed_input_channels,
config.patch_embeds_hidden_size,
kernel_size=(patch_size[0], patch_size[1] * 3),
stride=(patch_stride[0], patch_stride[1] * 3),
padding=padding,
)
def forward(self, hidden_states, is_longer_idx=None):
if self.enable_fusion:
# retrieve the last mel as we have transposed the input
global_hidden_states = hidden_states[:, 0:1, :, :]
# global processing
batch_size, num_channels, height, width = global_hidden_states.shape
if height != self.img_size[0] or width != self.img_size[1]:
raise ValueError(
f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
)
global_hidden_states = self.proj(global_hidden_states)
output_width = global_hidden_states.size(-1)
if len(is_longer_idx) > 0:
# local processing
local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous()
batch_size, num_channels, height, width = local_hidden_states.shape
local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width)
local_hidden_states = self.mel_conv2d(local_hidden_states)
_, features, height, width = local_hidden_states.shape
local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width)
local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
local_width = local_hidden_states.size(-1)
local_hidden_states = torch.nn.functional.pad(
local_hidden_states, (0, output_width - local_width), "constant", 0
)
global_hidden_states[is_longer_idx] = self.fusion_model(
global_hidden_states[is_longer_idx], local_hidden_states
)
hidden_states = global_hidden_states
else:
_, _, height, width = hidden_states.shape
if height != self.img_size[0] or width != self.img_size[1]:
raise ValueError(
f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
)
hidden_states = self.proj(hidden_states)
if self.flatten:
hidden_states = hidden_states.flatten(2).transpose(1, 2)
hidden_states = self.norm(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio
| ClapAudioPatchEmbed |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 6769,
"end": 7687
} | class ____(Benchmark):
params = [
['float64', 'int64', 'float32', 'int32', 'int16', 'float16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
],
[10, 100, 1000],
]
param_names = ['dtype', 'array_type', 'k']
# The size of the benchmarked arrays.
ARRAY_SIZE = 100000
def setup(self, dtype, array_type, k):
rnd = np.random.seed(2136297818)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(
self.ARRAY_SIZE, dtype, *array_type[1:], rnd)
def time_partition(self, dtype, array_type, k):
temp = np.partition(self.arr, k)
def time_argpartition(self, dtype, array_type, k):
temp = np.argpartition(self.arr, k)
| Partition |
python | ansible__ansible | test/integration/targets/connection_delegation/action_plugins/delegation_action.py | {
"start": 84,
"end": 268
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
return {
'remote_password': self._connection.get_option('remote_password'),
}
| ActionModule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 826428,
"end": 827166
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for User."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("OrganizationMemberEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| OrganizationMemberConnection |
python | huggingface__transformers | examples/modular-transformers/modeling_my_new_model2.py | {
"start": 11485,
"end": 11600
} | class ____(GenericForSequenceClassification, MyNewModel2PreTrainedModel):
pass
| MyNewModel2ForSequenceClassification |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/mro3.py | {
"start": 357,
"end": 419
} | class ____(QualifiedObject, SubclassableObject):
pass
| Source |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 96610,
"end": 99890
} | class ____(TypedDict, total=False):
"""
:class:`altair.FormatConfig` ``TypedDict`` wrapper.
Parameters
----------
normalizedNumberFormat
If normalizedNumberFormatType is not specified, D3 number format for axis labels,
text marks, and tooltips of normalized stacked fields (fields with ``stack:
"normalize"``). For example ``"s"`` for SI units. Use `D3's number format pattern
<https://github.com/d3/d3-format#locale_format>`__.
If ``config.normalizedNumberFormatType`` is specified and
``config.customFormatTypes`` is ``true``, this value will be passed as ``format``
alongside ``datum.value`` to the ``config.numberFormatType`` function. **Default
value:** ``%``
normalizedNumberFormatType
`Custom format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for
``config.normalizedNumberFormat``.
**Default value:** ``undefined`` -- This is equilvalent to call D3-format, which is
exposed as `format in Vega-Expression
<https://vega.github.io/vega/docs/expressions/#format>`__. **Note:** You must also
set ``customFormatTypes`` to ``true`` to use this feature.
numberFormat
If numberFormatType is not specified, D3 number format for guide labels, text marks,
and tooltips of non-normalized fields (fields *without* ``stack: "normalize"``). For
example ``"s"`` for SI units. Use `D3's number format pattern
<https://github.com/d3/d3-format#locale_format>`__.
If ``config.numberFormatType`` is specified and ``config.customFormatTypes`` is
``true``, this value will be passed as ``format`` alongside ``datum.value`` to the
``config.numberFormatType`` function.
numberFormatType
`Custom format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for
``config.numberFormat``.
**Default value:** ``undefined`` -- This is equilvalent to call D3-format, which is
exposed as `format in Vega-Expression
<https://vega.github.io/vega/docs/expressions/#format>`__. **Note:** You must also
set ``customFormatTypes`` to ``true`` to use this feature.
timeFormat
Default time format for raw time values (without time units) in text marks, legend
labels and header labels.
**Default value:** ``"%b %d, %Y"`` **Note:** Axes automatically determine the format
for each label automatically so this config does not affect axes.
timeFormatType
`Custom format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for
``config.timeFormat``.
**Default value:** ``undefined`` -- This is equilvalent to call D3-time-format,
which is exposed as `timeFormat in Vega-Expression
<https://vega.github.io/vega/docs/expressions/#timeFormat>`__. **Note:** You must
also set ``customFormatTypes`` to ``true`` and there must *not* be a ``timeUnit``
defined to use this feature.
"""
normalizedNumberFormat: str
normalizedNumberFormatType: str
numberFormat: str
numberFormatType: str
timeFormat: str
timeFormatType: str
| FormatConfigKwds |
python | viewflow__viewflow | viewflow/workflow/flow/nodes.py | {
"start": 8445,
"end": 10097
} | class ____(
mixins.NodeDetailMixin,
mixins.NodeExecuteMixin,
mixins.NodeUndoMixin,
mixins.NodeReviveMixin,
nodes.Split,
):
"""
Represents a parallel split gateway in a workflow, allowing branching into multiple parallel paths.
Methods:
- `Next(node, case=None, data_source=None)`: Defines the subsequent node in the workflow.
* `node`: The next node to execute.
* `case` (optional): A callable that takes an activation and returns `True` if the node should be activated.
* `data_source` (optional): A callable that takes an activation and returns a list of data items, creating an instance of the node for each item, with `task.data` set to the item.
- `Always(node)`: A shortcut to define a subsequent node that is always executed.
Example:
.. code-block:: python
flow.Split()
.Next(
this.approve,
case=act.process.approved,
data_source=lambda activation: [{"sample": "test task 1"}, {"sample": "test task 2"}],
)
.Always(this.required)
In this example:
- The `approve` node is executed multiple times based on the `data_source` list.
- The `required` node is always executed unconditionally in parallel.
Notes:
- If `case` is not provided, the node is always activated.
- If `data_source` is not provided, the node is created only once.
"""
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
revive_view_class = views.ReviveTaskView
| Split |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/module_path_separator/package.py | {
"start": 217,
"end": 887
} | class ____(Package):
homepage = "http://www.spack.llnl.gov"
url = "http://www.spack.llnl.gov/module-path-separator-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.append_path("COLON", "foo")
env.prepend_path("COLON", "foo")
env.remove_path("COLON", "foo")
env.append_path("SEMICOLON", "bar", separator=";")
env.prepend_path("SEMICOLON", "bar", separator=";")
env.remove_path("SEMICOLON", "bar", separator=";")
env.append_flags("SPACE", "qux")
env.remove_flags("SPACE", "qux")
| ModulePathSeparator |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 19930,
"end": 20130
} | class ____(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
history = HistoricalRecords(history_id_field=models.UUIDField(default=uuid.uuid4))
| UUIDModel |
python | python-pillow__Pillow | src/PIL/ImageTransform.py | {
"start": 370,
"end": 1058
} | class ____(Image.ImageTransformHandler):
"""Base class for other transforms defined in :py:mod:`~PIL.ImageTransform`."""
method: Image.Transform
def __init__(self, data: Sequence[Any]) -> None:
self.data = data
def getdata(self) -> tuple[Image.Transform, Sequence[int]]:
return self.method, self.data
def transform(
self,
size: tuple[int, int],
image: Image.Image,
**options: Any,
) -> Image.Image:
"""Perform the transform. Called from :py:meth:`.Image.transform`."""
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
| Transform |
python | ray-project__ray | python/ray/data/_internal/logical/operators/all_to_all_operator.py | {
"start": 2819,
"end": 4217
} | class ____(AbstractAllToAll, LogicalOperatorSupportsPredicatePassThrough):
"""Logical operator for random_shuffle."""
def __init__(
self,
input_op: LogicalOperator,
name: str = "RandomShuffle",
seed: Optional[int] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
):
super().__init__(
name,
input_op,
sub_progress_bar_names=[
ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME,
ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME,
],
ray_remote_args=ray_remote_args,
)
self._seed = seed
def infer_metadata(self) -> "BlockMetadata":
assert len(self._input_dependencies) == 1, len(self._input_dependencies)
assert isinstance(self._input_dependencies[0], LogicalOperator)
return self._input_dependencies[0].infer_metadata()
def infer_schema(
self,
) -> Optional["Schema"]:
assert len(self._input_dependencies) == 1, len(self._input_dependencies)
assert isinstance(self._input_dependencies[0], LogicalOperator)
return self._input_dependencies[0].infer_schema()
def predicate_passthrough_behavior(self) -> PredicatePassThroughBehavior:
# Random shuffle doesn't affect filtering correctness
return PredicatePassThroughBehavior.PASSTHROUGH
| RandomShuffle |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 3510,
"end": 3622
} | class ____(str): # NoQA: FURB189,SLOT000
"""docstring"""
def __repr__(self):
return self
| StrRepr |
python | huggingface__transformers | src/transformers/models/glm/modeling_glm.py | {
"start": 13651,
"end": 15421
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: GlmConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GlmAttention(config=config, layer_idx=layer_idx)
self.mlp = GlmMLP(config)
self.input_layernorm = GlmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GlmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| GlmDecoderLayer |
python | walkccc__LeetCode | solutions/2702. Minimum Operations to Make Numbers Non-positive/2702.py | {
"start": 0,
"end": 542
} | class ____:
def minOperations(self, nums: list[int], x: int, y: int) -> int:
def isPossible(m: int) -> bool:
"""
Returns True if it's possible to make all `nums` <= 0 using m operations.
"""
# If we want m operations, first decrease all the numbers by y * m. Then
# we have m operations to select indices to decrease them by x - y.
return sum(max(0, math.ceil((num - y * m) / (x - y)))
for num in nums) <= m
return bisect.bisect_left(range(max(nums)), True, key=isPossible)
| Solution |
python | scipy__scipy | scipy/integrate/tests/test_cubature.py | {
"start": 32601,
"end": 34716
} | class ____:
"""
Tests underlying quadrature rules (ndim == 1).
"""
@pytest.mark.parametrize(("rule", "rule_args"), [
(GaussLegendreQuadrature, (3,)),
(GaussLegendreQuadrature, (5,)),
(GaussLegendreQuadrature, (10,)),
(GaussKronrodQuadrature, (15,)),
(GaussKronrodQuadrature, (21,)),
])
def test_base_1d_quadratures_simple(self, rule, rule_args, xp):
quadrature = rule(*rule_args, xp=xp)
n = xp.arange(5, dtype=xp.float64)
def f(x):
x_reshaped = xp.reshape(x, (-1, 1, 1))
n_reshaped = xp.reshape(n, (1, -1, 1))
return x_reshaped**n_reshaped
a = xp.asarray([0], dtype=xp.float64)
b = xp.asarray([2], dtype=xp.float64)
exact = xp.reshape(2**(n+1)/(n+1), (-1, 1))
estimate = quadrature.estimate(f, a, b)
xp_assert_close(
estimate,
exact,
rtol=1e-8,
atol=0,
)
@pytest.mark.parametrize(("rule_pair", "rule_pair_args"), [
((GaussLegendreQuadrature, GaussLegendreQuadrature), (10, 5)),
])
def test_base_1d_quadratures_error_from_difference(self, rule_pair, rule_pair_args,
xp):
n = xp.arange(5, dtype=xp.float64)
a = xp.asarray([0], dtype=xp.float64)
b = xp.asarray([2], dtype=xp.float64)
higher = rule_pair[0](rule_pair_args[0], xp=xp)
lower = rule_pair[1](rule_pair_args[1], xp=xp)
rule = NestedFixedRule(higher, lower)
res = cubature(
basic_1d_integrand,
a, b,
rule=rule,
rtol=1e-8,
args=(n, xp),
)
xp_assert_close(
res.estimate,
basic_1d_integrand_exact(n, xp),
rtol=1e-8,
atol=0,
)
@pytest.mark.parametrize("quadrature", [
GaussLegendreQuadrature
])
def test_one_point_fixed_quad_impossible(self, quadrature, xp):
with pytest.raises(Exception):
quadrature(1, xp=xp)
| TestRulesQuadrature |
python | python-openxml__python-docx | src/docx/opc/phys_pkg.py | {
"start": 3375,
"end": 4005
} | class ____(PhysPkgWriter):
"""Implements |PhysPkgWriter| interface for a zip file OPC package."""
def __init__(self, pkg_file):
super(_ZipPkgWriter, self).__init__()
self._zipf = ZipFile(pkg_file, "w", compression=ZIP_DEFLATED)
def close(self):
"""Close the zip archive, flushing any pending physical writes and releasing any
resources it's using."""
self._zipf.close()
def write(self, pack_uri, blob):
"""Write `blob` to this zip package with the membername corresponding to
`pack_uri`."""
self._zipf.writestr(pack_uri.membername, blob)
| _ZipPkgWriter |
python | tensorflow__tensorflow | third_party/xla/xla/python_api/xla_literal_test.py | {
"start": 2619,
"end": 9341
} | class ____(absltest.TestCase):
def assertShape(self, shape, expected_dimensions, expected_element_type):
self.assertEqual(shape.element_type, expected_element_type)
self.assertEqual(shape.dimensions, expected_dimensions)
def assertLayout(self, layout, expected_minor_to_major):
self.assertEqual(layout.minor_to_major, expected_minor_to_major)
def assertTupleShape(self, shape, expected):
self.assertEqual(shape.element_type, xla_data_pb2.TUPLE)
for sub_shape, sub_expected in zip(
shape.tuple_shapes, expected):
if sub_shape.element_type == xla_data_pb2.TUPLE:
self.assertTupleShape(sub_shape, sub_expected)
else:
expected_dimensions, expected_element_types = sub_expected
self.assertShape(
sub_shape, expected_dimensions, expected_element_types)
def testConvertNumpyScalar1DToLiteral(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(array(1.1))
self.assertShape(literal.shape, [], etype)
self.assertLayout(literal.shape.layout, [])
np.testing.assert_allclose(pbfield(literal), [1.1])
def testConvertNumpyArray1DToLiteral(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(
array([1.1, 2.2, 3.3]))
self.assertShape(literal.shape, [3], etype)
self.assertLayout(literal.shape.layout, [0])
np.testing.assert_allclose(pbfield(literal), [1.1, 2.2, 3.3])
def testConvertNumpyArray2DToLiteral(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(
array([[1, 2, 3], [4, 5, 6]]))
self.assertShape(literal.shape, [2, 3], etype)
# By default the layout is row-major ('C' order).
self.assertLayout(literal.shape.layout, [1, 0])
np.testing.assert_allclose(pbfield(literal), [1, 2, 3, 4, 5, 6])
def testConvertNumpyArray2DToLiteralColumnMajor(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(
array(
[[1, 2, 3], [4, 5, 6]], order='F'))
self.assertShape(literal.shape, [2, 3], etype)
self.assertLayout(literal.shape.layout, [0, 1])
np.testing.assert_allclose(pbfield(literal), [1, 4, 2, 5, 3, 6])
def testConvertNumpyArray3DToLiteral(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(
array([[[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], [[
100, 200, 300
], [400, 500, 600]], [[1000, 2000, 3000], [4000, 5000, 6000]]]))
self.assertShape(literal.shape, [4, 2, 3], etype)
self.assertLayout(literal.shape.layout, [2, 1, 0])
np.testing.assert_allclose(pbfield(literal), [
1, 2, 3, 4, 5, 6, 10, 20, 30, 40, 50, 60, 100, 200, 300, 400, 500,
600, 1000, 2000, 3000, 4000, 5000, 6000
])
def testConvertTupleOfNumpyArray3DToLiteral(self):
for array, etype, pbfield in float_arrays:
inner_array = array([
[[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]],
[[100, 200, 300], [400, 500, 600]],
[[1000, 2000, 3000], [4000, 5000, 6000]]])
inner_spec = ([4, 2, 3], etype)
inner_flat = [
1, 2, 3, 4, 5, 6, 10, 20, 30, 40, 50, 60, 100, 200, 300, 400, 500,
600, 1000, 2000, 3000, 4000, 5000, 6000
]
literal = xla_literal.ConvertNumpyArrayToLiteral(
(inner_array, inner_array, inner_array))
self.assertTupleShape(
literal.shape,
(inner_spec, inner_spec, inner_spec))
for subliteral in literal.tuple_literals:
self.assertLayout(subliteral.shape.layout, [2, 1, 0])
np.testing.assert_allclose(pbfield(subliteral), inner_flat)
def testConvertNestedTupleOfNumpyArray3DToLiteral(self):
for array, etype, pbfield in float_arrays:
inner_array = array([
[[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]],
[[100, 200, 300], [400, 500, 600]],
[[1000, 2000, 3000], [4000, 5000, 6000]]])
inner_spec = ([4, 2, 3], etype)
inner_flat = [
1, 2, 3, 4, 5, 6, 10, 20, 30, 40, 50, 60, 100, 200, 300, 400, 500,
600, 1000, 2000, 3000, 4000, 5000, 6000
]
literal = xla_literal.ConvertNumpyArrayToLiteral(
(inner_array, (inner_array, inner_array), inner_array))
self.assertTupleShape(
literal.shape,
(inner_spec, (inner_spec, inner_spec), inner_spec))
leaf_literals = (
literal.tuple_literals[0],
literal.tuple_literals[1].tuple_literals[0],
literal.tuple_literals[1].tuple_literals[1],
literal.tuple_literals[2])
for leaf_literal in leaf_literals:
self.assertLayout(leaf_literal.shape.layout, [2, 1, 0])
np.testing.assert_allclose(pbfield(leaf_literal), inner_flat)
def testConvertNumpyArray3DToLiteralColumnMajor(self):
for array, etype, pbfield in float_arrays:
literal = xla_literal.ConvertNumpyArrayToLiteral(
array(
[[[1, 2, 3], [4, 5, 6]], [[10, 20, 30], [40, 50, 60]], [[
100, 200, 300
], [400, 500, 600]], [[1000, 2000, 3000], [4000, 5000, 6000]]],
order='F'))
self.assertShape(literal.shape, [4, 2, 3], etype)
self.assertLayout(literal.shape.layout, [0, 1, 2])
np.testing.assert_allclose(pbfield(literal), [
1, 10, 100, 1000, 4, 40, 400, 4000, 2, 20, 200, 2000, 5, 50, 500,
5000, 3, 30, 300, 3000, 6, 60, 600, 6000
])
def testNumpyToLiteralToNumpyRoundtrip(self):
def _DoRoundtripTest(ndarray_in):
literal = xla_literal.ConvertNumpyArrayToLiteral(ndarray_in)
ndarray_out = xla_literal.ConvertLiteralToNumpyArray(literal)
np.testing.assert_allclose(ndarray_in, ndarray_out)
_DoRoundtripTest(NumpyArrayBool([False, True, True, False]))
for array, _, _ in itertools.chain(float_arrays, int_arrays):
## Scalars
_DoRoundtripTest(array(42))
_DoRoundtripTest(array(42, order='F'))
## 1D
_DoRoundtripTest(array([42, 52]))
_DoRoundtripTest(array([42, 52], order='F'))
## 2D
_DoRoundtripTest(array([[1, 2, 3], [10, 20, 30]]))
_DoRoundtripTest(array([[1, 2, 3], [10, 20, 30]], order='F'))
## 3D
_DoRoundtripTest(array([[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]))
_DoRoundtripTest(array([[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]], order='F'))
if __name__ == '__main__':
absltest.main()
| XlaLiteralTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/source.py | {
"start": 4690,
"end": 8517
} | class ____(AbstractSource):
@property
def continue_sync_on_stream_failure(self) -> bool:
return True
@staticmethod
def get_shop_name(config) -> str:
split_pattern = ".myshopify.com"
shop_name = config.get("shop")
return shop_name.split(split_pattern)[0] if split_pattern in shop_name else shop_name
@staticmethod
def format_stream_name(name) -> str:
return "".join(x.capitalize() for x in name.split("_"))
def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, any]:
"""
Testing connection availability for the connector.
"""
config["shop"] = self.get_shop_name(config)
config["authenticator"] = ShopifyAuthenticator(config)
return ConnectionCheckTest(config).test_connection()
def select_transactions_stream(self, config: Mapping[str, Any]) -> Stream:
"""
Allow the Customer to decide which API type to use when it comes to the `Transactions` stream.
"""
should_fetch_user_id = config.get("fetch_transactions_user_id")
if should_fetch_user_id:
return Transactions(config)
else:
return TransactionsGraphql(config)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""
Mapping a input config of the user input configuration as defined in the connector spec.
Defining streams to run.
"""
config["shop"] = self.get_shop_name(config)
config["authenticator"] = ShopifyAuthenticator(config)
# add `shop_id` int value
config["shop_id"] = ConnectionCheckTest(config).get_shop_id()
# define scopes checker
scopes_manager: ShopifyScopes = ShopifyScopes(config)
# get the list of the permitted streams, based on the authenticated user scopes
permitted_streams = scopes_manager.get_permitted_streams()
stream_instances = [
AbandonedCheckouts(config),
Articles(config),
BalanceTransactions(config),
Blogs(config),
Collections(config),
Collects(config),
CustomCollections(config),
CustomerJourneySummary(config),
Customers(config),
DiscountCodes(config),
Disputes(config),
DraftOrders(config),
FulfillmentOrders(config),
Fulfillments(config),
InventoryItems(config),
InventoryLevels(config),
Locations(config),
MetafieldArticles(config),
MetafieldBlogs(config),
MetafieldCollections(config),
MetafieldCustomers(config),
MetafieldDraftOrders(config),
MetafieldLocations(config),
MetafieldOrders(config),
MetafieldPages(config),
MetafieldProductImages(config),
MetafieldProducts(config),
MetafieldProductVariants(config),
MetafieldShops(config),
MetafieldSmartCollections(config),
OrderAgreements(config),
OrderRefunds(config),
OrderRisks(config),
Orders(config),
Pages(config),
PriceRules(config),
ProductImages(config),
Products(config),
ProductVariants(config),
Shop(config),
SmartCollections(config),
TenderTransactions(config),
self.select_transactions_stream(config),
CustomerAddress(config),
Countries(config=config, parent=ProfileLocationGroups(config)),
]
return [
stream_instance for stream_instance in stream_instances if self.format_stream_name(stream_instance.name) in permitted_streams
]
| SourceShopify |
python | kamyu104__LeetCode-Solutions | Python/best-team-with-no-conflicts.py | {
"start": 3185,
"end": 3884
} | class ____(object):
def bestTeamScore(self, scores, ages):
"""
:type scores: List[int]
:type ages: List[int]
:rtype: int
"""
players = sorted(zip(ages, scores))
sorted_scores = sorted(set(scores))
lookup = {score:i for i, score in enumerate(sorted_scores)} # coordinate compression
segment_tree = SegmentTree(len(lookup))
result = 0
for age, score in players:
segment_tree.update(lookup[score], lookup[score], segment_tree.query(0, lookup[score])+score)
return segment_tree.query(0, len(lookup)-1)
# Time: O(n * a)
# Space: O(n)
import collections
# optimized from Solution5
| Solution2 |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 95654,
"end": 98664
} | class ____(Operation):
def __init__(
self, kernel_size, dilation=1, padding=0, stride=1, *, name=None
):
super().__init__(name=name)
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def compute_output_spec(self, x):
N, C, H, W = x.shape
def _pair(x):
return (x, x) if isinstance(x, int) else x
kH, kW = _pair(self.kernel_size)
dH, dW = _pair(self.dilation)
pH, pW = _pair(self.padding)
sH, sW = _pair(self.stride)
def out_size(L, k, d, p, s):
return (L + 2 * p - d * (k - 1) - 1) // s + 1
outH = out_size(H, kH, dH, pH, sH)
outW = out_size(W, kW, dW, pW, sW)
return KerasTensor(shape=(N, C * kH * kW, outH * outW), dtype=x.dtype)
def call(self, x):
return _unfold(
x, self.kernel_size, self.dilation, self.padding, self.stride
)
@keras_export(["keras.ops.unfold", "keras.ops.nn.unfold"])
def unfold(x, kernel_size, dilation=1, padding=0, stride=1):
"""Extract sliding local blocks from a 4-D input (batched image).
This operation is known as **im2col** when used with convolution.
It rearranges the image into overlapping or non-overlapping patches
and returns a tensor whose *depth* (last axis) contains the flattened
patches.
Args:
x: A 4-D tensor of shape `(N, C, H, W)` (**channels-first** format).
kernel_size: int or tuple of two ints, the size of the sliding window
`(kH, kW)`. If a single int is given, it is used for both
dimensions.
dilation: int or tuple of two ints, the spacing between kernel points
(a.k.a. **dilation** or **atrous** convolution). Default: 1.
padding: int or tuple of two ints, the amount of zero-padding to apply
to both spatial dimensions. Default: 0.
stride: int or tuple of two ints, the step size of the sliding window.
Default: 1.
Returns:
A 3-D tensor of shape `(N, C * kH * kW, L)` where
`L = num_patches_H * num_patches_W` is the total number of patches
extracted.
Example:
>>> x = keras.ops.ones((1, 2, 4, 4))
>>> patches = keras.ops.unfold(x, kernel_size=2, stride=2)
>>> patches.shape
(1, 8, 4)
"""
input_shape = x.shape
ndims = len(input_shape)
if ndims != 4:
raise ValueError(
f"Input must be a 4D tensor. Received: input.shape={input_shape}"
)
if any_symbolic_tensors((x,)):
return Unfold(kernel_size, dilation, padding, stride).symbolic_call(x)
return _unfold(x, kernel_size, dilation, padding, stride)
def _unfold(x, kernel_size, dilation=1, padding=0, stride=1):
"""Internal implementation of unfold."""
return backend.nn.unfold(
x,
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride,
)
| Unfold |
python | scipy__scipy | scipy/cluster/tests/test_hierarchy.py | {
"start": 11152,
"end": 11901
} | class ____:
@make_xp_test_case(is_isomorphic)
@pytest.mark.parametrize("criterion,t",
[("inconsistent", t) for t in hierarchy_test_data.fcluster_inconsistent]
+ [("distance", t) for t in hierarchy_test_data.fcluster_distance]
+ [("maxclust", t) for t in hierarchy_test_data.fcluster_maxclust]
)
def test_fclusterdata(self, t, criterion, xp):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set
expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
X = xp.asarray(hierarchy_test_data.Q_X)
T = fclusterdata(X, criterion=criterion, t=t)
assert is_isomorphic(T, expectedT)
@make_xp_test_case(fcluster)
| TestFclusterData |
python | ApeWorX__ape | src/ape/cli/choices.py | {
"start": 10796,
"end": 13273
} | class ____(click.Choice):
"""
A ``click.Choice`` to provide network choice defaults for the active project.
Optionally provide a list of ecosystem names, network names, or provider names
to filter the results by.
This is used in :meth:`~ape.cli.options.network_option`.
"""
CUSTOM_NETWORK_PATTERN = re.compile(r"\w*:\w*:(https?|wss?)://\w*.*|.*\.ipc")
def __init__(
self,
case_sensitive=True,
ecosystem: _NETWORK_FILTER = None,
network: _NETWORK_FILTER = None,
provider: _NETWORK_FILTER = None,
base_type: Optional[Union[type, str]] = None,
callback: Optional[Callable] = None,
):
self._base_type = base_type
self.callback = callback
self.case_sensitive = case_sensitive
self.ecosystem = ecosystem
self.network = network
self.provider = provider
# NOTE: Purposely avoid super().init for performance reasons.
@property
def base_type(self) -> Union[type["ProviderAPI"], str]:
if self._base_type is not None:
return self._base_type
# perf: Keep base-type as a forward-ref when only using the default.
# so things load faster.
self._base_type = "ProviderAPI"
return self._base_type
@base_type.setter
def base_type(self, value):
self._base_type = value
@cached_property
def choices(self) -> Sequence[Any]: # type: ignore[override]
return get_networks(ecosystem=self.ecosystem, network=self.network, provider=self.provider)
def get_metavar(self, param):
return "[ecosystem-name][:[network-name][:[provider-name]]]"
def convert(self, value: Any, param: Optional[Parameter], ctx: Optional[Context]) -> Any:
if not value or value.lower() in ("none", "null"):
return self.callback(ctx, param, _NONE_NETWORK) if self.callback else _NONE_NETWORK
if self.base_type == "ProviderAPI" or isinstance(self.base_type, type):
# Return the provider.
from ape.utils.basemodel import ManagerAccessMixin as access
networks = access.network_manager
try:
value = networks.get_provider_from_choice(network_choice=value)
except (EcosystemNotFoundError, NetworkNotFoundError, ProviderNotFoundError) as err:
self.fail(str(err))
return self.callback(ctx, param, value) if self.callback else value
| NetworkChoice |
python | chroma-core__chroma | chromadb/logservice/logservice.py | {
"start": 859,
"end": 5870
} | class ____(Producer, Consumer):
"""
Distributed Chroma Log Service
"""
_log_service_stub: LogServiceStub
_request_timeout_seconds: int
_channel: grpc.Channel
_log_service_url: str
_log_service_port: int
def __init__(self, system: System):
self._log_service_url = system.settings.require("chroma_logservice_host")
self._log_service_port = system.settings.require("chroma_logservice_port")
self._request_timeout_seconds = system.settings.require(
"chroma_logservice_request_timeout_seconds"
)
self._opentelemetry_client = system.require(OpenTelemetryClient)
super().__init__(system)
@trace_method("LogService.start", OpenTelemetryGranularity.ALL)
@override
def start(self) -> None:
self._channel = grpc.insecure_channel(
f"{self._log_service_url}:{self._log_service_port}",
)
interceptors = [OtelInterceptor(), RetryOnRpcErrorClientInterceptor()]
self._channel = grpc.intercept_channel(self._channel, *interceptors)
self._log_service_stub = LogServiceStub(self._channel) # type: ignore
super().start()
@trace_method("LogService.stop", OpenTelemetryGranularity.ALL)
@override
def stop(self) -> None:
self._channel.close()
super().stop()
@trace_method("LogService.reset_state", OpenTelemetryGranularity.ALL)
@override
def reset_state(self) -> None:
super().reset_state()
@trace_method("LogService.delete_log", OpenTelemetryGranularity.ALL)
@override
def delete_log(self, collection_id: UUID) -> None:
raise NotImplementedError("Not implemented")
@trace_method("LogService.purge_log", OpenTelemetryGranularity.ALL)
@override
def purge_log(self, collection_id: UUID) -> None:
raise NotImplementedError("Not implemented")
@trace_method("LogService.submit_embedding", OpenTelemetryGranularity.ALL)
@override
def submit_embedding(
self, collection_id: UUID, embedding: OperationRecord
) -> SeqId:
if not self._running:
raise RuntimeError("Component not running")
return self.submit_embeddings(collection_id, [embedding])[0]
@trace_method("LogService.submit_embeddings", OpenTelemetryGranularity.ALL)
@override
def submit_embeddings(
self, collection_id: UUID, embeddings: Sequence[OperationRecord]
) -> Sequence[SeqId]:
logger.info(
f"Submitting {len(embeddings)} embeddings to log for collection {collection_id}"
)
add_attributes_to_current_span(
{
"records_count": len(embeddings),
}
)
if not self._running:
raise RuntimeError("Component not running")
if len(embeddings) == 0:
return []
# push records to the log service
counts = []
protos_to_submit = [to_proto_submit(record) for record in embeddings]
counts.append(
self.push_logs(
collection_id,
cast(Sequence[OperationRecord], protos_to_submit),
)
)
# This returns counts, which is completely incorrect
# TODO: Fix this
return counts
@trace_method("LogService.subscribe", OpenTelemetryGranularity.ALL)
@override
def subscribe(
self,
collection_id: UUID,
consume_fn: ConsumerCallbackFn,
start: Optional[SeqId] = None,
end: Optional[SeqId] = None,
id: Optional[UUID] = None,
) -> UUID:
logger.info(f"Subscribing to log for {collection_id}, noop for logservice")
return UUID(int=0)
@trace_method("LogService.unsubscribe", OpenTelemetryGranularity.ALL)
@override
def unsubscribe(self, subscription_id: UUID) -> None:
logger.info(f"Unsubscribing from {subscription_id}, noop for logservice")
@override
def min_seqid(self) -> SeqId:
return 0
@override
def max_seqid(self) -> SeqId:
return sys.maxsize
@property
@override
def max_batch_size(self) -> int:
return 100
def push_logs(self, collection_id: UUID, records: Sequence[OperationRecord]) -> int:
request = PushLogsRequest(collection_id=str(collection_id), records=records)
response = self._log_service_stub.PushLogs(
request, timeout=self._request_timeout_seconds
)
return response.record_count # type: ignore
def pull_logs(
self, collection_id: UUID, start_offset: int, batch_size: int
) -> Sequence[LogRecord]:
request = PullLogsRequest(
collection_id=str(collection_id),
start_from_offset=start_offset,
batch_size=batch_size,
end_timestamp=time.time_ns(),
)
response = self._log_service_stub.PullLogs(
request, timeout=self._request_timeout_seconds
)
return response.records # type: ignore
| LogService |
python | mwaskom__seaborn | tests/test_base.py | {
"start": 19667,
"end": 49072
} | class ____:
def test_flat_variables(self, flat_data):
p = VectorPlotter()
p.assign_variables(data=flat_data)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_data)
try:
expected_x = flat_data.index
expected_x_name = flat_data.index.name
except AttributeError:
expected_x = np.arange(len(flat_data))
expected_x_name = None
x = p.plot_data["x"]
assert_array_equal(x, expected_x)
expected_y = flat_data
expected_y_name = getattr(flat_data, "name", None)
y = p.plot_data["y"]
assert_array_equal(y, expected_y)
assert p.variables["x"] == expected_x_name
assert p.variables["y"] == expected_y_name
def test_long_df(self, long_df, long_variables):
p = VectorPlotter()
p.assign_variables(data=long_df, variables=long_variables)
assert p.input_format == "long"
assert p.variables == long_variables
for key, val in long_variables.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_df_with_index(self, long_df, long_variables):
p = VectorPlotter()
p.assign_variables(
data=long_df.set_index("a"),
variables=long_variables,
)
assert p.input_format == "long"
assert p.variables == long_variables
for key, val in long_variables.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_df_with_multiindex(self, long_df, long_variables):
p = VectorPlotter()
p.assign_variables(
data=long_df.set_index(["a", "x"]),
variables=long_variables,
)
assert p.input_format == "long"
assert p.variables == long_variables
for key, val in long_variables.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_dict(self, long_dict, long_variables):
p = VectorPlotter()
p.assign_variables(
data=long_dict,
variables=long_variables,
)
assert p.input_format == "long"
assert p.variables == long_variables
for key, val in long_variables.items():
assert_array_equal(p.plot_data[key], pd.Series(long_dict[val]))
@pytest.mark.parametrize(
"vector_type",
["series", "numpy", "list"],
)
def test_long_vectors(self, long_df, long_variables, vector_type):
variables = {key: long_df[val] for key, val in long_variables.items()}
if vector_type == "numpy":
variables = {key: val.to_numpy() for key, val in variables.items()}
elif vector_type == "list":
variables = {key: val.to_list() for key, val in variables.items()}
p = VectorPlotter()
p.assign_variables(variables=variables)
assert p.input_format == "long"
assert list(p.variables) == list(long_variables)
if vector_type == "series":
assert p.variables == long_variables
for key, val in long_variables.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_undefined_variables(self, long_df):
p = VectorPlotter()
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="not_in_df"),
)
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="x", y="not_in_df"),
)
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="x", y="y", hue="not_in_df"),
)
@pytest.mark.parametrize(
"arg", [[], np.array([]), pd.DataFrame()],
)
def test_empty_data_input(self, arg):
p = VectorPlotter()
p.assign_variables(data=arg)
assert not p.variables
if not isinstance(arg, pd.DataFrame):
p = VectorPlotter()
p.assign_variables(variables=dict(x=arg, y=arg))
assert not p.variables
def test_units(self, repeated_df):
p = VectorPlotter()
p.assign_variables(
data=repeated_df,
variables=dict(x="x", y="y", units="u"),
)
assert_array_equal(p.plot_data["units"], repeated_df["u"])
@pytest.mark.parametrize("name", [3, 4.5])
def test_long_numeric_name(self, long_df, name):
long_df[name] = long_df["x"]
p = VectorPlotter()
p.assign_variables(data=long_df, variables={"x": name})
assert_array_equal(p.plot_data["x"], long_df[name])
assert p.variables["x"] == str(name)
def test_long_hierarchical_index(self, rng):
cols = pd.MultiIndex.from_product([["a"], ["x", "y"]])
data = rng.uniform(size=(50, 2))
df = pd.DataFrame(data, columns=cols)
name = ("a", "y")
var = "y"
p = VectorPlotter()
p.assign_variables(data=df, variables={var: name})
assert_array_equal(p.plot_data[var], df[name])
assert p.variables[var] == str(name)
def test_long_scalar_and_data(self, long_df):
val = 22
p = VectorPlotter(data=long_df, variables={"x": "x", "y": val})
assert (p.plot_data["y"] == val).all()
assert p.variables["y"] is None
def test_wide_semantic_error(self, wide_df):
err = "The following variable cannot be assigned with wide-form data: `hue`"
with pytest.raises(ValueError, match=err):
VectorPlotter(data=wide_df, variables={"hue": "a"})
def test_long_unknown_error(self, long_df):
err = "Could not interpret value `what` for `hue`"
with pytest.raises(ValueError, match=err):
VectorPlotter(data=long_df, variables={"x": "x", "hue": "what"})
def test_long_unmatched_size_error(self, long_df, flat_array):
err = "Length of ndarray vectors must match length of `data`"
with pytest.raises(ValueError, match=err):
VectorPlotter(data=long_df, variables={"x": "x", "hue": flat_array})
def test_wide_categorical_columns(self, wide_df):
wide_df.columns = pd.CategoricalIndex(wide_df.columns)
p = VectorPlotter(data=wide_df)
assert_array_equal(p.plot_data["hue"].unique(), ["a", "b", "c"])
def test_iter_data_quantitites(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
out = p.iter_data("hue")
assert len(list(out)) == 1
var = "a"
n_subsets = len(long_df[var].unique())
semantics = ["hue", "size", "style"]
for semantic in semantics:
p = VectorPlotter(
data=long_df,
variables={"x": "x", "y": "y", semantic: var},
)
getattr(p, f"map_{semantic}")()
out = p.iter_data(semantics)
assert len(list(out)) == n_subsets
var = "a"
n_subsets = len(long_df[var].unique())
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var, style=var),
)
p.map_hue()
p.map_style()
out = p.iter_data(semantics)
assert len(list(out)) == n_subsets
# --
out = p.iter_data(semantics, reverse=True)
assert len(list(out)) == n_subsets
# --
var1, var2 = "a", "s"
n_subsets = len(long_df[var1].unique())
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, style=var2),
)
p.map_hue()
p.map_style()
out = p.iter_data(["hue"])
assert len(list(out)) == n_subsets
n_subsets = len(set(list(map(tuple, long_df[[var1, var2]].values))))
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, style=var2),
)
p.map_hue()
p.map_style()
out = p.iter_data(semantics)
assert len(list(out)) == n_subsets
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, size=var2, style=var1),
)
p.map_hue()
p.map_size()
p.map_style()
out = p.iter_data(semantics)
assert len(list(out)) == n_subsets
# --
var1, var2, var3 = "a", "s", "b"
cols = [var1, var2, var3]
n_subsets = len(set(list(map(tuple, long_df[cols].values))))
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, size=var2, style=var3),
)
p.map_hue()
p.map_size()
p.map_style()
out = p.iter_data(semantics)
assert len(list(out)) == n_subsets
def test_iter_data_keys(self, long_df):
semantics = ["hue", "size", "style"]
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
for sub_vars, _ in p.iter_data("hue"):
assert sub_vars == {}
# --
var = "a"
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var),
)
for sub_vars, _ in p.iter_data("hue"):
assert list(sub_vars) == ["hue"]
assert sub_vars["hue"] in long_df[var].values
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", size=var),
)
for sub_vars, _ in p.iter_data("size"):
assert list(sub_vars) == ["size"]
assert sub_vars["size"] in long_df[var].values
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var, style=var),
)
for sub_vars, _ in p.iter_data(semantics):
assert list(sub_vars) == ["hue", "style"]
assert sub_vars["hue"] in long_df[var].values
assert sub_vars["style"] in long_df[var].values
assert sub_vars["hue"] == sub_vars["style"]
var1, var2 = "a", "s"
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, size=var2),
)
for sub_vars, _ in p.iter_data(semantics):
assert list(sub_vars) == ["hue", "size"]
assert sub_vars["hue"] in long_df[var1].values
assert sub_vars["size"] in long_df[var2].values
semantics = ["hue", "col", "row"]
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue=var1, col=var2),
)
for sub_vars, _ in p.iter_data("hue"):
assert list(sub_vars) == ["hue", "col"]
assert sub_vars["hue"] in long_df[var1].values
assert sub_vars["col"] in long_df[var2].values
def test_iter_data_values(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
p.sort = True
_, sub_data = next(p.iter_data("hue"))
assert_frame_equal(sub_data, p.plot_data)
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
)
for sub_vars, sub_data in p.iter_data("hue"):
rows = p.plot_data["hue"] == sub_vars["hue"]
assert_frame_equal(sub_data, p.plot_data[rows])
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", size="s"),
)
for sub_vars, sub_data in p.iter_data(["hue", "size"]):
rows = p.plot_data["hue"] == sub_vars["hue"]
rows &= p.plot_data["size"] == sub_vars["size"]
assert_frame_equal(sub_data, p.plot_data[rows])
def test_iter_data_reverse(self, long_df):
reversed_order = categorical_order(long_df["a"])[::-1]
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a")
)
iterator = p.iter_data("hue", reverse=True)
for i, (sub_vars, _) in enumerate(iterator):
assert sub_vars["hue"] == reversed_order[i]
def test_iter_data_dropna(self, null_df):
p = VectorPlotter(
data=null_df,
variables=dict(x="x", y="y", hue="a")
)
p.map_hue()
for _, sub_df in p.iter_data("hue"):
assert not sub_df.isna().any().any()
some_missing = False
for _, sub_df in p.iter_data("hue", dropna=False):
some_missing |= sub_df.isna().any().any()
assert some_missing
def test_axis_labels(self, long_df):
f, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables=dict(x="a"))
p._add_axis_labels(ax)
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == ""
ax.clear()
p = VectorPlotter(data=long_df, variables=dict(y="a"))
p._add_axis_labels(ax)
assert ax.get_xlabel() == ""
assert ax.get_ylabel() == "a"
ax.clear()
p = VectorPlotter(data=long_df, variables=dict(x="a"))
p._add_axis_labels(ax, default_y="default")
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == "default"
ax.clear()
p = VectorPlotter(data=long_df, variables=dict(y="a"))
p._add_axis_labels(ax, default_x="default", default_y="default")
assert ax.get_xlabel() == "default"
assert ax.get_ylabel() == "a"
ax.clear()
p = VectorPlotter(data=long_df, variables=dict(x="x", y="a"))
ax.set(xlabel="existing", ylabel="also existing")
p._add_axis_labels(ax)
assert ax.get_xlabel() == "existing"
assert ax.get_ylabel() == "also existing"
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y"))
p._add_axis_labels(ax1)
p._add_axis_labels(ax2)
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "y"
assert ax1.yaxis.label.get_visible()
assert ax2.get_xlabel() == "x"
assert ax2.get_ylabel() == "y"
assert not ax2.yaxis.label.get_visible()
@pytest.mark.parametrize(
"variables",
[
dict(x="x", y="y"),
dict(x="x"),
dict(y="y"),
dict(x="t", y="y"),
dict(x="x", y="a"),
]
)
def test_attach_basics(self, long_df, variables):
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables=variables)
p._attach(ax)
assert p.ax is ax
def test_attach_disallowed(self, long_df):
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "a"})
with pytest.raises(TypeError):
p._attach(ax, allowed_types="numeric")
with pytest.raises(TypeError):
p._attach(ax, allowed_types=["datetime", "numeric"])
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x"})
with pytest.raises(TypeError):
p._attach(ax, allowed_types="categorical")
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "t"})
with pytest.raises(TypeError):
p._attach(ax, allowed_types=["numeric", "categorical"])
def test_attach_log_scale(self, long_df):
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "log"
assert ax.yaxis.get_scale() == "linear"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x"})
p._attach(ax, log_scale=2)
assert ax.xaxis.get_scale() == "log"
assert ax.yaxis.get_scale() == "linear"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"y": "y"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "linear"
assert ax.yaxis.get_scale() == "log"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "log"
assert ax.yaxis.get_scale() == "log"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y"})
p._attach(ax, log_scale=(True, False))
assert ax.xaxis.get_scale() == "log"
assert ax.yaxis.get_scale() == "linear"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y"})
p._attach(ax, log_scale=(False, 2))
assert ax.xaxis.get_scale() == "linear"
assert ax.yaxis.get_scale() == "log"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "a", "y": "y"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "linear"
assert ax.yaxis.get_scale() == "log"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "t"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "log"
assert ax.yaxis.get_scale() == "linear"
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "a", "y": "b"})
p._attach(ax, log_scale=True)
assert ax.xaxis.get_scale() == "linear"
assert ax.yaxis.get_scale() == "linear"
def test_attach_converters(self, long_df):
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "t"})
p._attach(ax)
assert get_converter(ax.xaxis) is None
assert "Date" in get_converter(ax.yaxis).__class__.__name__
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "a", "y": "y"})
p._attach(ax)
assert "CategoryConverter" in get_converter(ax.xaxis).__class__.__name__
assert get_converter(ax.yaxis) is None
def test_attach_facets(self, long_df):
g = FacetGrid(long_df, col="a")
p = VectorPlotter(data=long_df, variables={"x": "x", "col": "a"})
p._attach(g)
assert p.ax is None
assert p.facets == g
def test_scale_transform_identity(self, long_df):
_, ax = plt.subplots()
p = VectorPlotter(data=long_df, variables={"x": "x"})
p._attach(ax)
fwd, inv = p._get_scale_transforms("x")
x = np.arange(1, 10)
assert_array_equal(fwd(x), x)
assert_array_equal(inv(x), x)
def test_scale_transform_identity_facets(self, long_df):
g = FacetGrid(long_df, col="a")
p = VectorPlotter(data=long_df, variables={"x": "x", "col": "a"})
p._attach(g)
fwd, inv = p._get_scale_transforms("x")
x = np.arange(1, 10)
assert_array_equal(fwd(x), x)
assert_array_equal(inv(x), x)
def test_scale_transform_log(self, long_df):
_, ax = plt.subplots()
ax.set_xscale("log")
p = VectorPlotter(data=long_df, variables={"x": "x"})
p._attach(ax)
fwd, inv = p._get_scale_transforms("x")
x = np.arange(1, 4)
assert_array_almost_equal(fwd(x), np.log10(x))
assert_array_almost_equal(inv(x), 10 ** x)
def test_scale_transform_facets(self, long_df):
g = FacetGrid(long_df, col="a")
p = VectorPlotter(data=long_df, variables={"x": "x", "col": "a"})
p._attach(g)
fwd, inv = p._get_scale_transforms("x")
x = np.arange(4)
assert_array_equal(inv(fwd(x)), x)
def test_scale_transform_mixed_facets(self, long_df):
g = FacetGrid(long_df, col="a", sharex=False)
g.axes.flat[0].set_xscale("log")
p = VectorPlotter(data=long_df, variables={"x": "x", "col": "a"})
p._attach(g)
err = "Cannot determine transform with mixed scales on faceted axes"
with pytest.raises(RuntimeError, match=err):
p._get_scale_transforms("x")
def test_attach_shared_axes(self, long_df):
g = FacetGrid(long_df)
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y"})
p._attach(g)
assert p.converters["x"].nunique() == 1
g = FacetGrid(long_df, col="a")
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y", "col": "a"})
p._attach(g)
assert p.converters["x"].nunique() == 1
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", sharex=False)
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y", "col": "a"})
p._attach(g)
assert p.converters["x"].nunique() == p.plot_data["col"].nunique()
assert p.converters["x"].groupby(p.plot_data["col"]).nunique().max() == 1
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", sharex=False, col_wrap=2)
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "y", "col": "a"})
p._attach(g)
assert p.converters["x"].nunique() == p.plot_data["col"].nunique()
assert p.converters["x"].groupby(p.plot_data["col"]).nunique().max() == 1
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", row="b")
p = VectorPlotter(
data=long_df, variables={"x": "x", "y": "y", "col": "a", "row": "b"},
)
p._attach(g)
assert p.converters["x"].nunique() == 1
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", row="b", sharex=False)
p = VectorPlotter(
data=long_df, variables={"x": "x", "y": "y", "col": "a", "row": "b"},
)
p._attach(g)
assert p.converters["x"].nunique() == len(g.axes.flat)
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", row="b", sharex="col")
p = VectorPlotter(
data=long_df, variables={"x": "x", "y": "y", "col": "a", "row": "b"},
)
p._attach(g)
assert p.converters["x"].nunique() == p.plot_data["col"].nunique()
assert p.converters["x"].groupby(p.plot_data["col"]).nunique().max() == 1
assert p.converters["y"].nunique() == 1
g = FacetGrid(long_df, col="a", row="b", sharey="row")
p = VectorPlotter(
data=long_df, variables={"x": "x", "y": "y", "col": "a", "row": "b"},
)
p._attach(g)
assert p.converters["x"].nunique() == 1
assert p.converters["y"].nunique() == p.plot_data["row"].nunique()
assert p.converters["y"].groupby(p.plot_data["row"]).nunique().max() == 1
def test_get_axes_single(self, long_df):
ax = plt.figure().subplots()
p = VectorPlotter(data=long_df, variables={"x": "x", "hue": "a"})
p._attach(ax)
assert p._get_axes({"hue": "a"}) is ax
def test_get_axes_facets(self, long_df):
g = FacetGrid(long_df, col="a")
p = VectorPlotter(data=long_df, variables={"x": "x", "col": "a"})
p._attach(g)
assert p._get_axes({"col": "b"}) is g.axes_dict["b"]
g = FacetGrid(long_df, col="a", row="c")
p = VectorPlotter(
data=long_df, variables={"x": "x", "col": "a", "row": "c"}
)
p._attach(g)
assert p._get_axes({"row": 1, "col": "b"}) is g.axes_dict[(1, "b")]
def test_comp_data(self, long_df):
p = VectorPlotter(data=long_df, variables={"x": "x", "y": "t"})
# We have disabled this check for now, while it remains part of
# the internal API, because it will require updating a number of tests
# with pytest.raises(AttributeError):
# p.comp_data
_, ax = plt.subplots()
p._attach(ax)
assert_array_equal(p.comp_data["x"], p.plot_data["x"])
assert_array_equal(
p.comp_data["y"], ax.yaxis.convert_units(p.plot_data["y"])
)
p = VectorPlotter(data=long_df, variables={"x": "a"})
_, ax = plt.subplots()
p._attach(ax)
assert_array_equal(
p.comp_data["x"], ax.xaxis.convert_units(p.plot_data["x"])
)
def test_comp_data_log(self, long_df):
p = VectorPlotter(data=long_df, variables={"x": "z", "y": "y"})
_, ax = plt.subplots()
p._attach(ax, log_scale=(True, False))
assert_array_equal(
p.comp_data["x"], np.log10(p.plot_data["x"])
)
assert_array_equal(p.comp_data["y"], p.plot_data["y"])
def test_comp_data_category_order(self):
s = (pd.Series(["a", "b", "c", "a"], dtype="category")
.cat.set_categories(["b", "c", "a"], ordered=True))
p = VectorPlotter(variables={"x": s})
_, ax = plt.subplots()
p._attach(ax)
assert_array_equal(
p.comp_data["x"],
[2, 0, 1, 2],
)
@pytest.fixture(
params=itertools.product(
[None, np.nan, pd.NA],
["numeric", "category", "datetime"],
)
)
def comp_data_missing_fixture(self, request):
# This fixture holds the logic for parameterizing
# the following test (test_comp_data_missing)
NA, var_type = request.param
comp_data = [0, 1, np.nan, 2, np.nan, 1]
if var_type == "numeric":
orig_data = [0, 1, NA, 2, np.inf, 1]
elif var_type == "category":
orig_data = ["a", "b", NA, "c", pd.NA, "b"]
elif var_type == "datetime":
# Use 1-based numbers to avoid issue on matplotlib<3.2
# Could simplify the test a bit when we roll off that version
comp_data = [1, 2, np.nan, 3, np.nan, 2]
numbers = [1, 2, 3, 2]
orig_data = mpl.dates.num2date(numbers)
orig_data.insert(2, NA)
orig_data.insert(4, np.inf)
return orig_data, comp_data
def test_comp_data_missing(self, comp_data_missing_fixture):
orig_data, comp_data = comp_data_missing_fixture
p = VectorPlotter(variables={"x": orig_data})
ax = plt.figure().subplots()
p._attach(ax)
assert_array_equal(p.comp_data["x"], comp_data)
assert p.comp_data["x"].dtype == "float"
def test_comp_data_duplicate_index(self):
x = pd.Series([1, 2, 3, 4, 5], [1, 1, 1, 2, 2])
p = VectorPlotter(variables={"x": x})
ax = plt.figure().subplots()
p._attach(ax)
assert_array_equal(p.comp_data["x"], x)
def test_comp_data_nullable_dtype(self):
x = pd.Series([1, 2, 3, 4], dtype="Int64")
p = VectorPlotter(variables={"x": x})
ax = plt.figure().subplots()
p._attach(ax)
assert_array_equal(p.comp_data["x"], x)
assert p.comp_data["x"].dtype == "float"
def test_var_order(self, long_df):
order = ["c", "b", "a"]
for var in ["hue", "size", "style"]:
p = VectorPlotter(data=long_df, variables={"x": "x", var: "a"})
mapper = getattr(p, f"map_{var}")
mapper(order=order)
assert p.var_levels[var] == order
def test_scale_native(self, long_df):
p = VectorPlotter(data=long_df, variables={"x": "x"})
with pytest.raises(NotImplementedError):
p.scale_native("x")
def test_scale_numeric(self, long_df):
p = VectorPlotter(data=long_df, variables={"y": "y"})
with pytest.raises(NotImplementedError):
p.scale_numeric("y")
def test_scale_datetime(self, long_df):
p = VectorPlotter(data=long_df, variables={"x": "t"})
with pytest.raises(NotImplementedError):
p.scale_datetime("x")
def test_scale_categorical(self, long_df):
p = VectorPlotter(data=long_df, variables={"x": "x"})
p.scale_categorical("y")
assert p.variables["y"] is None
assert p.var_types["y"] == "categorical"
assert (p.plot_data["y"] == "").all()
p = VectorPlotter(data=long_df, variables={"x": "s"})
p.scale_categorical("x")
assert p.var_types["x"] == "categorical"
assert hasattr(p.plot_data["x"], "str")
assert not p._var_ordered["x"]
assert p.plot_data["x"].is_monotonic_increasing
assert_array_equal(p.var_levels["x"], p.plot_data["x"].unique())
p = VectorPlotter(data=long_df, variables={"x": "a"})
p.scale_categorical("x")
assert not p._var_ordered["x"]
assert_array_equal(p.var_levels["x"], categorical_order(long_df["a"]))
p = VectorPlotter(data=long_df, variables={"x": "a_cat"})
p.scale_categorical("x")
assert p._var_ordered["x"]
assert_array_equal(p.var_levels["x"], categorical_order(long_df["a_cat"]))
p = VectorPlotter(data=long_df, variables={"x": "a"})
order = np.roll(long_df["a"].unique(), 1)
p.scale_categorical("x", order=order)
assert p._var_ordered["x"]
assert_array_equal(p.var_levels["x"], order)
p = VectorPlotter(data=long_df, variables={"x": "s"})
p.scale_categorical("x", formatter=lambda x: f"{x:%}")
assert p.plot_data["x"].str.endswith("%").all()
assert all(s.endswith("%") for s in p.var_levels["x"])
| TestVectorPlotter |
python | pytorch__pytorch | torch/_export/db/examples/cond_predicate.py | {
"start": 95,
"end": 663
} | class ____(torch.nn.Module):
"""
The conditional statement (aka predicate) passed to cond() must be one of the following:
- torch.Tensor with a single element
- boolean expression
NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
"""
def forward(self, x):
pred = x.dim() > 2 and x.shape[2] > 10
return cond(pred, lambda x: x.cos(), lambda y: y.sin(), [x])
example_args = (torch.randn(6, 4, 3),)
tags = {
"torch.cond",
"torch.dynamic-shape",
}
model = CondPredicate()
| CondPredicate |
python | celery__celery | celery/worker/consumer/events.py | {
"start": 226,
"end": 2054
} | class ____(bootsteps.StartStopStep):
"""Service used for sending monitoring events."""
requires = (Connection,)
def __init__(self, c,
task_events=True,
without_heartbeat=False,
without_gossip=False,
**kwargs):
self.groups = None if task_events else ['worker']
self.send_events = (
task_events or
not without_gossip or
not without_heartbeat
)
self.enabled = self.send_events
c.event_dispatcher = None
super().__init__(c, **kwargs)
def start(self, c):
# flush events sent while connection was down.
prev = self._close(c)
dis = c.event_dispatcher = c.app.events.Dispatcher(
c.connection_for_write(),
hostname=c.hostname,
enabled=self.send_events,
groups=self.groups,
# we currently only buffer events when the event loop is enabled
# XXX This excludes eventlet/gevent, which should actually buffer.
buffer_group=['task'] if c.hub else None,
on_send_buffered=c.on_send_event_buffered if c.hub else None,
)
if prev:
dis.extend_buffer(prev)
dis.flush()
def stop(self, c):
pass
def _close(self, c):
if c.event_dispatcher:
dispatcher = c.event_dispatcher
# remember changes from remote control commands:
self.groups = dispatcher.groups
# close custom connection
if dispatcher.connection:
ignore_errors(c, dispatcher.connection.close)
ignore_errors(c, dispatcher.close)
c.event_dispatcher = None
return dispatcher
def shutdown(self, c):
self._close(c)
| Events |
python | Pylons__pyramid | tests/test_router.py | {
"start": 61341,
"end": 61481
} | class ____:
def __init__(self, root):
self.root = root
def __call__(self, environ):
return self.root
| DummyRootFactory |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 46902,
"end": 47879
} | class ____:
"""
Test beta and betaln.
"""
def test_beta(self):
assert_equal(special.beta(1, 1), 1.0)
assert_allclose(special.beta(-100.3, 1e-200), special.gamma(1e-200))
assert_allclose(special.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
bet = special.beta(2, 4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_allclose(bet, betg, rtol=1e-13)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_betaln(self):
assert_equal(special.betaln(1, 1), 0.0)
assert_allclose(special.betaln(-100.3, 1e-200),
special.gammaln(1e-200))
assert_allclose(special.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
betln = special.betaln(2, 4)
bet = log(abs(special.beta(2, 4)))
assert_allclose(betln, bet, rtol=1e-13)
| TestBeta |
python | pytorch__pytorch | torch/autograd/graph.py | {
"start": 25039,
"end": 27216
} | class ____(TorchDispatchMode):
def __init__(self, ctx: "_AllowMutationOnSavedContext") -> None:
self.ctx = ctx
def __torch_dispatch__(
self,
func: "OpOverload",
types: Iterable[type],
args: tuple[Any, ...] = (),
kwargs: Optional[dict[Any, Any]] = None,
) -> Any:
kwargs = kwargs or {}
def maybe_clone(t: torch.Tensor) -> None:
tid = _get_tid(t)
sid = _get_sid(t)
ctx = self.ctx
if sid in ctx.sid_to_tid:
for tid in ctx.sid_to_tid[sid]:
if tid not in ctx.tid_to_weakhandle:
# We know that if tid is in sid_to_tid, then it must also be in
# tid_to_weakhandle. However, it is possible for the tensor to be
# saved at one point, but cleared by backward before it is modified
# in-place. Consider the following example:
#
# >>> a = torch.randn(2, 3, requires_grad=True).clone()
# >>> out = (a**2).sum()
# >>> out.backward()
# >>> a.sin_()
continue
handle = ctx.tid_to_weakhandle[tid]
if handle in ctx.cloned:
# The same exact tensor has been cloned already
continue
ctx.cloned[handle] = ctx.original[handle].clone()
del ctx.original[handle]
for idx, arg in enumerate(func._schema.arguments):
if arg.alias_info is not None and arg.alias_info.is_write:
if arg.is_out:
maybe_clone(kwargs["out"])
elif isinstance(args[idx], list):
# Foreach case. (Possible optimization: if most of the
# tensors need to be cloned, use a for each clone?)
for t in args[idx]:
maybe_clone(t)
else:
maybe_clone(args[idx])
return func(*args, **kwargs)
| _CloneArgBeforeMutateMode |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 261628,
"end": 264649
} | class ____(Response):
"""
Response of tasks.publish endpoint.
:param committed_versions_results: Committed versions results
:type committed_versions_results: Sequence[dict]
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "publish"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"committed_versions_results": {
"description": "Committed versions results",
"items": {"additionalProperties": True, "type": "object"},
"type": ["array", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
committed_versions_results: Optional[List[dict]] = None,
updated: Optional[int] = None,
fields: Optional[dict] = None,
**kwargs: Any
) -> None:
super(PublishResponse, self).__init__(**kwargs)
self.committed_versions_results = committed_versions_results
self.updated = updated
self.fields = fields
@schema_property("committed_versions_results")
def committed_versions_results(self) -> Optional[List[dict]]:
return self._property_committed_versions_results
@committed_versions_results.setter
def committed_versions_results(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_committed_versions_results = None
return
self.assert_isinstance(value, "committed_versions_results", (list, tuple))
self.assert_isinstance(value, "committed_versions_results", (dict,), is_array=True)
self._property_committed_versions_results = value
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| PublishResponse |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/bedrock/_beta_messages.py | {
"start": 2510,
"end": 2749
} | class ____:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = _legacy_response.async_to_raw_response_wrapper(
messages.create,
)
| AsyncMessagesWithRawResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 10467,
"end": 11335
} | class ____(Projects, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-projects/#api-rest-api-3-project-post
"""
def path(self, **kwargs) -> str:
return "project"
def generate(self):
for index in range(1, 51):
payload = json.dumps(
{
"key": f"TESTKEY{index}",
"name": f"Test project {index}",
"projectTypeKey": "software",
"projectTemplateKey": "com.pyxis.greenhopper.jira:gh-simplified-scrum-classic",
"description": f"Test project {index} description",
"leadAccountId": "5fc9e78d2730d800760becc4",
"assigneeType": "PROJECT_LEAD",
}
)
self.generate_record(payload)
| ProjectsGenerator |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 3647,
"end": 3861
} | class ____(GQLResult):
typename__: Typename[Literal["ArtifactType"]] = "ArtifactType"
id: GQLId
name: str
description: Optional[str]
created_at: str = Field(alias="createdAt")
| ArtifactTypeFragment |
python | pytorch__pytorch | test/dynamo/test_ctx_manager.py | {
"start": 49687,
"end": 66162
} | class ____(torch.nn.Module):
def forward(self, L_y_: "f32[]"):
l_y_ = L_y_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable('This is not supported'); _saved_tensors_hooks_disable = None
mul: "f32[]" = l_y_ * 2; l_y_ = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (mul,)
""", # NOQA: B950
)
def test_context_wrapping_grad_mode_decorator(self):
ctx_wrappers = [(torch.enable_grad, True), (torch.no_grad, False)]
for call in [True, False]:
for i in range(2):
torch._dynamo.reset()
ctx_wrapper, _ = ctx_wrappers[i]
ctx_wrapper_inverse, mode_inverse = ctx_wrappers[(i + 1) % 2]
def fn(x):
def inner_func(x):
return x.sin()
with ctx_wrapper_inverse():
if call:
inner_func = ctx_wrapper()(inner_func)
else:
inner_func = ctx_wrapper(inner_func)
# Calling no_grad or enabled_grad should not mutate global state
assert torch.is_grad_enabled() == mode_inverse
with ctx_wrapper_inverse():
return inner_func(x)
x = torch.zeros(10, requires_grad=True)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
def test_context_wrapping_grad_mode_nested_function_decorator(self):
ctx_wrappers = [(torch.enable_grad, True), (torch.no_grad, False)]
for call in [True, False]:
for i in range(2):
torch._dynamo.reset()
ctx_wrapper, _ = ctx_wrappers[i]
ctx_wrapper_inverse, mode_inverse = ctx_wrappers[(i + 1) % 2]
def fn(x):
with ctx_wrapper_inverse():
if call:
@ctx_wrapper()
def inner_func(x):
return x.sin()
else:
@ctx_wrapper
def inner_func(x):
return x.sin()
# Calling no_grad or enabled_grad should not mutate global state
assert torch.is_grad_enabled() == mode_inverse
with ctx_wrapper_inverse():
return inner_func(x)
x = torch.zeros(10, requires_grad=True)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
def test_context_wrapping_set_grad_enabled_nested_function(self):
modes = [True, False]
for decorator in [True, False]:
for i in range(2):
torch._dynamo.reset()
mode = modes[i]
mode_inverse = modes[(i + 1) % 2]
def fn(x):
with torch.set_grad_enabled(mode_inverse):
if decorator:
@torch.set_grad_enabled(mode)
def inner_func(x):
return x.sin()
else:
def inner_func(x):
return x.sin()
inner_func = torch.set_grad_enabled(mode)(inner_func)
# Consuming set_grad_enabled by calling it on a function
# should not mutate global state
assert torch.is_grad_enabled() == mode_inverse
with torch.set_grad_enabled(mode_inverse):
return inner_func(x)
x = torch.zeros(10, requires_grad=True)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
def test_inactive_context_graph_break_local(self):
def fn(x):
x = x + 1
ctx = torch.set_grad_enabled(True)
torch._dynamo.graph_break()
with ctx:
x = x + 1
return x
x = torch.zeros(10, requires_grad=False)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
self.assertEqual(cnts.frame_count, 2)
def test_inactive_context_graph_break_local_nullctx(self):
import contextlib
# test with context manager that results in None target_values
def fn(x):
x = x + 1
ctx = contextlib.nullcontext()
torch._dynamo.graph_break()
with ctx:
x = x + 1
return x
x = torch.zeros(10, requires_grad=False)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
self.assertEqual(cnts.frame_count, 2)
def test_inactive_context_graph_break_local_nullctx2(self):
import contextlib
# test with nullcontext where graph break happens
# in an inlined function that returns something
def gn():
torch._dynamo.graph_break()
return [0, 1, 2]
def fn(x):
x = x + 1
ctx = contextlib.nullcontext()
lst = gn()
with ctx:
x = x + lst[1]
return x
x = torch.zeros(10, requires_grad=False)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
self.assertEqual(cnts.frame_count, 2)
def test_inactive_context_graph_break_stack(self):
def gn(ctx):
torch._dynamo.graph_break()
return ctx
def fn(x):
x = x + 1
ctx = gn(torch.set_grad_enabled(True))
# we expect a graph break on next line as well
with ctx:
x = x + 1
return x
x = torch.zeros(10, requires_grad=False)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
def test_inactive_context_graph_break_stack2(self):
def gn(x, ctx, y, z, dummy):
with ctx:
return x * y * z
def fn(x):
x = x + 1
x = gn(x, torch.set_grad_enabled(True), 2, 3, torch._dynamo.graph_break())
return x
x = torch.zeros(10, requires_grad=False)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
self.assertEqual(cnts.frame_count, 2)
def test_sdpa_kernel_ctx_manager1(self):
modified_backend_state = [torch.nn.attention.SDPBackend.MATH]
@torch._dynamo.allow_in_graph
def check_backend_state_is_modified():
self.assertEqual(
torch.nn.attention._cur_sdpa_kernel_backends(), modified_backend_state
)
def f(x):
with torch.nn.attention.sdpa_kernel(
# pyre-fixme[16]: Module `torch.nn.attention` has no attribute `SDPBackend`.
[torch.nn.attention.SDPBackend.MATH]
):
output = torch.nn.functional.scaled_dot_product_attention(x, x, x).to(
torch.float32
)
check_backend_state_is_modified()
return output
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(torch.randn(2, 2, 2, 2).to(dtype=torch.float16))
def test_sdpa_kernel_ctx_manager2(self):
original_backend_state = set(torch.nn.attention._cur_sdpa_kernel_backends())
modified_backend_state = [torch.nn.attention.SDPBackend.MATH]
@torch._dynamo.allow_in_graph
def check_backend_state_is_original():
self.assertEqual(
set(torch.nn.attention._cur_sdpa_kernel_backends()),
original_backend_state,
)
@torch._dynamo.allow_in_graph
def check_backend_state_is_modified():
self.assertEqual(
torch.nn.attention._cur_sdpa_kernel_backends(), modified_backend_state
)
def g(x):
torch._dynamo.graph_break()
output = torch.nn.functional.scaled_dot_product_attention(x, x, x).to(
torch.float32
)
check_backend_state_is_modified()
return output
def f(x):
check_backend_state_is_original()
with torch.nn.attention.sdpa_kernel(
# pyre-fixme[16]: Module `torch.nn.attention` has no attribute `SDPBackend`.
[torch.nn.attention.SDPBackend.MATH]
):
output1 = torch.nn.functional.scaled_dot_product_attention(x, x, x).to(
torch.float32
)
check_backend_state_is_modified()
# graph break
output2 = g(x)
output3 = torch.nn.functional.scaled_dot_product_attention(x, x, x).to(
torch.float32
)
check_backend_state_is_modified()
check_backend_state_is_original()
return output1 + output2 + output3
cnts = torch._dynamo.testing.CompileCounter()
opt_f = torch.compile(f, backend=cnts)
opt_f(torch.randn(2, 2, 2, 2).to(dtype=torch.float16))
self.assertEqual(cnts.frame_count, 2)
# test sdpa_kernel graph break with 2 arguments
def test_sdpa_kernel_ctx_manager3(self):
modified_backend_state = {
torch.nn.attention.SDPBackend.MATH,
torch.nn.attention.SDPBackend.FLASH_ATTENTION,
}
@torch._dynamo.allow_in_graph
def check_backend_state_is_modified():
self.assertEqual(
set(torch.nn.attention._cur_sdpa_kernel_backends()),
modified_backend_state,
)
def f(x):
with torch.nn.attention.sdpa_kernel(
# pyre-fixme[16]: Module `torch.nn.attention` has no attribute `SDPBackend`.
[
torch.nn.attention.SDPBackend.MATH,
torch.nn.attention.SDPBackend.FLASH_ATTENTION,
]
):
# FLASH_ATTENTION may not be supported, but we're not actually
# doing any sdpa
x = x + 1
torch._dynamo.graph_break()
check_backend_state_is_modified()
x = x + 1
return x
opt_f = torch.compile(f, backend="eager")
opt_f(torch.randn(2, 2))
# Regression test to make sure dynamo won't crash on these kwargs.
def test_sdpa_kernel_ctx_manager_kwargs(self):
backends = [torch.nn.attention.SDPBackend.MATH]
@torch._dynamo.allow_in_graph
def check_backend_state_is_modified():
self.assertEqual(
set(torch.nn.attention._cur_sdpa_kernel_backends()),
set(backends),
)
def f(x):
with torch.nn.attention.sdpa_kernel(backends=backends, set_priority=True):
x = x + 1
check_backend_state_is_modified()
x = x + 1
return x
opt_f = torch.compile(f, backend="eager")
opt_f(torch.randn(2, 2))
# Regression test to make sure dynamo won't graph break on calling functions
# decorated with special context manager.
def test_sdpa_kernel_ctx_manager_as_decorator(self):
SDPA_BACKEND_PRIORITY = [
torch.nn.attention.SDPBackend.MATH,
torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION,
torch.nn.attention.SDPBackend.FLASH_ATTENTION,
]
@torch.nn.attention.sdpa_kernel(
backends=SDPA_BACKEND_PRIORITY, set_priority=True
)
def scaled_dot_product_attention(q, k, v, *args, **kwargs):
return torch.nn.functional.scaled_dot_product_attention(
q, k, v, *args, **kwargs
)
def f(x):
return scaled_dot_product_attention(x, x, x)
opt_f = torch.compile(f, backend="eager", fullgraph=True)
x = torch.rand(16, 16, 64, 256, dtype=torch.float16)
ref = f(x)
res = opt_f(x)
self.assertEqual(ref, res)
# Regression test to make sure the value of set_priority is used correctly.
def test_sdpa_kernel_ctx_manager_set_priority(self):
backends = [torch.nn.attention.SDPBackend.MATH]
default_priority = torch._C._get_sdp_priority_order()
@torch._dynamo.allow_in_graph
def check_backend_priority(changed: bool):
self.assertEqual(
changed,
torch._C._get_sdp_priority_order() != default_priority,
)
def f(x):
with torch.nn.attention.sdpa_kernel(backends=backends, set_priority=True):
x = x + 1
check_backend_priority(changed=True)
x = x + 1
with torch.nn.attention.sdpa_kernel(backends=backends, set_priority=False):
x = x + 1
check_backend_priority(changed=False)
x = x + 1
return x
opt_f = torch.compile(f, backend="eager")
opt_f(torch.randn(2, 2))
def test_torch_profiler_use_after_with_block(self):
counters.clear()
def fn(x):
with torch.profiler.profile() as p:
pass
p.profiler.kineto_results.experimental_event_tree()
return x + 1
opt_fn = torch.compile(fn, backend="eager")
x = torch.ones(1)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(len(counters["graph_break"]), 1)
def test_311_resume_block_keyerror(self):
# https://github.com/pytorch/pytorch/issues/162313
flag = True
def fn(x):
x = x + 1
torch._dynamo.graph_break()
x = x + 2
if flag:
with torch.no_grad():
torch._dynamo.graph_break()
x = x + 4
else:
with torch.no_grad():
torch._dynamo.graph_break()
x = x + 8
return x + 16
inp = torch.ones(3)
opt_fn = torch.compile(fn, backend="eager")
self.assertEqual(fn(inp), opt_fn(inp))
flag = False
self.assertEqual(fn(inp), opt_fn(inp))
def test_311_resume_block_keyerror2(self):
# https://github.com/pytorch/pytorch/issues/166176
def fn(x):
torch._dynamo.graph_break()
with torch.no_grad():
with torch.no_grad():
torch._dynamo.graph_break()
return x + 1
inp = torch.ones(3)
opt_fn = torch.compile(fn, backend="eager")
self.assertEqual(fn(inp), opt_fn(inp))
def test_store_attr_graph_break_key_error(self):
# STORE_ATTR on dummy should result in graph break
def dummy():
pass
def fn(x):
x = x + 2
with torch.no_grad():
dummy.attr1 = x
return x + 4
inp = torch.ones(3)
opt_fn = torch.compile(fn, backend="eager")
self.assertEqual(fn(inp), opt_fn(inp))
self.assertGreater(len(counters["graph_break"]), 0)
| GraphModule |
python | pytorch__pytorch | torch/distributed/fsdp/_common_utils.py | {
"start": 3548,
"end": 3778
} | class ____(_FSDPDeviceHandle):
def __init__(self) -> None:
pass
def __getattribute__(self, name: str, /) -> Any:
raise RuntimeError("Trying to use an uninitialized device handle.")
| _UninitializedDeviceHandle |
python | pandas-dev__pandas | pandas/tests/indexes/test_indexing.py | {
"start": 2464,
"end": 5353
} | class ____:
@pytest.mark.parametrize(
"index,val",
[
([0, 1, 2], 2),
([0, 1, "2"], "2"),
([0, 1, 2, np.inf, 4], 4),
([0, 1, 2, np.nan, 4], 4),
([0, 1, 2, np.inf], np.inf),
([0, 1, 2, np.nan], np.nan),
],
)
def test_index_contains(self, index, val):
index = Index(index)
assert val in index
@pytest.mark.parametrize(
"index,val",
[
(Index([0, 1, 2]), "2"),
(Index([0, 1, "2"]), 2),
(Index([0, 1, 2, np.inf]), 4),
(Index([0, 1, 2, np.nan]), 4),
(Index([0, 1, 2, np.inf]), np.nan),
(Index([0, 1, 2, np.nan]), np.inf),
# Checking if np.inf in int64 Index should not cause an OverflowError
# Related to GH 16957
(Index([0, 1, 2], dtype=np.int64), np.inf),
(Index([0, 1, 2], dtype=np.int64), np.nan),
(Index([0, 1, 2], dtype=np.uint64), np.inf),
(Index([0, 1, 2], dtype=np.uint64), np.nan),
],
)
def test_index_not_contains(self, index, val):
assert val not in index
@pytest.mark.parametrize("val", [0, "2"])
def test_mixed_index_contains(self, val):
# GH#19860
index = Index([0, 1, "2"])
assert val in index
@pytest.mark.parametrize("val", ["1", 2])
def test_mixed_index_not_contains(self, index, val):
# GH#19860
index = Index([0, 1, "2"])
assert val not in index
def test_contains_with_float_index(self, any_real_numpy_dtype):
# GH#22085
dtype = any_real_numpy_dtype
data = [0, 1, 2, 3] if not is_float_dtype(dtype) else [0.1, 1.1, 2.2, 3.3]
index = Index(data, dtype=dtype)
if not is_float_dtype(index.dtype):
assert 1.1 not in index
assert 1.0 in index
assert 1 in index
else:
assert 1.1 in index
assert 1.0 not in index
assert 1 not in index
def test_contains_requires_hashable_raises(self, index):
if isinstance(index, MultiIndex):
return # TODO: do we want this to raise?
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in index
if PY314:
container_or_iterable = "a container or iterable"
else:
container_or_iterable = "iterable"
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not "
f"{container_or_iterable}",
]
)
with pytest.raises(TypeError, match=msg):
{} in index._engine
| TestContains |
python | mlflow__mlflow | examples/demos/mlflow-3/deep_learning.py | {
"start": 1006,
"end": 4573
} | class ____(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
# Load Iris dataset and prepare the DataFrame
iris = load_iris()
iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
iris_df["target"] = iris.target
# Split into training and testing datasets
train_df, test_df = train_test_split(iris_df, test_size=0.2, random_state=42)
# Prepare training data
train_dataset = mlflow.data.from_pandas(train_df, name="train")
X_train, y_train = prepare_data(train_dataset.df)
# Define the PyTorch model and move it to the device
input_size = X_train.shape[1]
hidden_size = 16
output_size = len(iris.target_names)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
scripted_model = IrisClassifier(input_size, hidden_size, output_size).to(device)
scripted_model = torch.jit.script(scripted_model)
# Start a run to represent the training job
with mlflow.start_run():
# Load the training dataset with MLflow. We will link training metrics to this dataset.
train_dataset: Dataset = mlflow.data.from_pandas(train_df, name="train")
X_train, y_train = prepare_data(train_dataset.df)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(scripted_model.parameters(), lr=0.01)
for epoch in range(51):
X_train = X_train.to(device)
y_train = y_train.to(device)
out = scripted_model(X_train)
loss = criterion(out, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Log a checkpoint with metrics every 10 epochs
if epoch % 10 == 0:
# Each newly created LoggedModel checkpoint is linked with its
# name, params, and step
model_info = mlflow.pytorch.log_model(
pytorch_model=scripted_model,
name=f"torch-iris-{epoch}",
params={
"n_layers": 3,
"activation": "ReLU",
"criterion": "CrossEntropyLoss",
"optimizer": "Adam",
},
step=epoch,
input_example=X_train.numpy(),
)
# Log metric on training dataset at step and link to LoggedModel
mlflow.log_metric(
key="accuracy",
value=compute_accuracy(scripted_model, X_train, y_train),
step=epoch,
model_id=model_info.model_id,
dataset=train_dataset,
)
# This example produced one MLflow Run (training_run) and 6 MLflow Logged Models,
# one for each checkpoint (at steps 0, 10, …, 50). Using MLflow's UI or search API,
# we can get the checkpoints and rank them by their accuracy.
ranked_checkpoints = mlflow.search_logged_models(
output_format="list", order_by=[{"field_name": "metrics.accuracy", "ascending": False}]
)
best_checkpoint: mlflow.entities.LoggedModel = ranked_checkpoints[0]
print(best_checkpoint.metrics[0])
print(best_checkpoint)
worst_checkpoint: mlflow.entities.LoggedModel = ranked_checkpoints[-1]
print(worst_checkpoint.metrics)
# Once the best checkpoint is selected, that model can be registered to the model registry.
mlflow.register_model(f"models:/{best_checkpoint.model_id}", name="my_dl_model")
| IrisClassifier |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance16.py | {
"start": 99,
"end": 636
} | class ____:
@classmethod
def bar(cls, other: type):
if issubclass(other, cls):
reveal_type(other, expected_text="type[Self@ClassA]")
if issubclass(other, (int, cls)):
reveal_type(other, expected_text="type[Self@ClassA] | type[int]")
def baz(self, other: object):
if isinstance(other, type(self)):
reveal_type(other, expected_text="Self@ClassA")
if isinstance(other, (int, type(self))):
reveal_type(other, expected_text="Self@ClassA | int")
| ClassA |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/http_request_stage.py | {
"start": 6691,
"end": 7103
} | class ____(StatefulStage):
"""
A stage that sends HTTP requests.
"""
fn: Type[StatefulStageUDF] = HttpRequestUDF
def get_required_input_keys(self) -> Dict[str, str]:
"""The required input keys of the stage and their descriptions."""
return {
"payload": "The payload to send to the HTTP request. "
"It should be in JSON format."
}
| HttpRequestStage |
python | OmkarPathak__pygorithm | tests/test_data_structure.py | {
"start": 2502,
"end": 3357
} | class ____(unittest.TestCase):
def test_queue(self):
myQueue = queue.Queue() # create a queue with default queue size 10
myQueue.enqueue(2)
myQueue.enqueue(10)
myQueue.enqueue(12)
myQueue.enqueue(3)
self.assertEqual(myQueue.dequeue(), 2)
self.assertEqual(myQueue.dequeue(), 10)
self.assertFalse(myQueue.is_empty())
self.assertEqual(myQueue.dequeue(), 12)
self.assertEqual(myQueue.dequeue(), 3)
self.assertTrue(myQueue.is_empty())
def test_deque(self):
myDeque = queue.Deque()
myDeque.insert_front(1) # 1
myDeque.insert_rear(2) # 2 1
myDeque.insert_front(3) # 2 1 3
myDeque.insert_rear(10) # 10 2 1 3
self.assertEqual(myDeque.delete_rear(), 10)
self.assertEqual(myDeque.delete_front(), 3)
| TestQueue |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 14203,
"end": 15105
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionRemoveProductFromProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id="id",
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.remove_product_from_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudVisionRemoveProductFromProductSetOperator |
python | pola-rs__polars | py-polars/src/polars/_typing.py | {
"start": 9588,
"end": 9857
} | class ____(TypedDict):
"""Underlying buffers of a Series."""
values: Series
validity: Series | None
offsets: Series | None
# minimal protocol definitions that can reasonably represent
# an executable connection, cursor, or equivalent object
| SeriesBuffers |
python | doocs__leetcode | solution/3400-3499/3408.Design Task Manager/Solution.py | {
"start": 0,
"end": 1120
} | class ____:
def __init__(self, tasks: List[List[int]]):
self.d = {}
self.st = SortedList()
for task in tasks:
self.add(*task)
def add(self, userId: int, taskId: int, priority: int) -> None:
self.d[taskId] = (userId, priority)
self.st.add((-priority, -taskId))
def edit(self, taskId: int, newPriority: int) -> None:
userId, priority = self.d[taskId]
self.st.discard((-priority, -taskId))
self.d[taskId] = (userId, newPriority)
self.st.add((-newPriority, -taskId))
def rmv(self, taskId: int) -> None:
_, priority = self.d[taskId]
self.d.pop(taskId)
self.st.remove((-priority, -taskId))
def execTop(self) -> int:
if not self.st:
return -1
taskId = -self.st.pop(0)[1]
userId, _ = self.d[taskId]
self.d.pop(taskId)
return userId
# Your TaskManager object will be instantiated and called as such:
# obj = TaskManager(tasks)
# obj.add(userId,taskId,priority)
# obj.edit(taskId,newPriority)
# obj.rmv(taskId)
# param_4 = obj.execTop()
| TaskManager |
python | dask__distributed | distributed/worker.py | {
"start": 4672,
"end": 4887
} | class ____(ErrorMessage):
op: Literal["task-erred"]
result: object
nbytes: int
type: type
start: float
stop: float
thread: int
actual_exception: BaseException | Exception
| RunTaskFailure |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/operators/test_analytics_admin.py | {
"start": 7884,
"end": 8843
} | class ____:
@mock.patch(f"{ANALYTICS_PATH}.GoogleAnalyticsAdminHook")
def test_execute(self, hook_mock):
mock_retry, mock_timeout, mock_metadata = (mock.MagicMock() for _ in range(3))
GoogleAnalyticsAdminDeleteDataStreamOperator(
task_id="test_task",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
property_id=TEST_PROPERTY_ID,
data_stream_id=TEST_DATASTREAM_ID,
retry=mock_retry,
timeout=mock_timeout,
metadata=mock_metadata,
).execute(context=None)
hook_mock.assert_called_once()
hook_mock.return_value.delete_data_stream.assert_called_once_with(
property_id=TEST_PROPERTY_ID,
data_stream_id=TEST_DATASTREAM_ID,
retry=mock_retry,
timeout=mock_timeout,
metadata=mock_metadata,
)
| TestGoogleAnalyticsAdminDeleteDataStreamOperator |
python | plotly__plotly.py | plotly/graph_objs/_streamtube.py | {
"start": 215,
"end": 79407
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "streamtube"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"lighting",
"lightposition",
"maxdisplayed",
"meta",
"metasrc",
"name",
"opacity",
"reversescale",
"scene",
"showlegend",
"showscale",
"sizeref",
"starts",
"stream",
"text",
"type",
"u",
"uhoverformat",
"uid",
"uirevision",
"usrc",
"v",
"vhoverformat",
"visible",
"vsrc",
"w",
"whoverformat",
"wsrc",
"x",
"xhoverformat",
"xsrc",
"y",
"yhoverformat",
"ysrc",
"z",
"zhoverformat",
"zsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here u/v/w norm) or the bounds set
in `cmin` and `cmax` Defaults to `false` when `cmin` and `cmax`
are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as u/v/w norm and if set, `cmin` must be set as
well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as u/v/w norm. Has no effect when `cauto` is
`false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as u/v/w norm and if set, `cmax` must be set as
well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.streamtube.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'u', 'v', 'w', 'norm', 'divergence', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.streamtube.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `tubex`,
`tubey`, `tubez`, `tubeu`, `tubev`, `tubew`, `norm` and
`divergence`. Anything contained in tag `<extra>` is displayed
in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.streamtube.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def lighting(self):
"""
The 'lighting' property is an instance of Lighting
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Lighting`
- A dict of string/value properties that will be passed
to the Lighting constructor
Returns
-------
plotly.graph_objs.streamtube.Lighting
"""
return self["lighting"]
@lighting.setter
def lighting(self, val):
self["lighting"] = val
@property
def lightposition(self):
"""
The 'lightposition' property is an instance of Lightposition
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Lightposition`
- A dict of string/value properties that will be passed
to the Lightposition constructor
Returns
-------
plotly.graph_objs.streamtube.Lightposition
"""
return self["lightposition"]
@lightposition.setter
def lightposition(self, val):
self["lightposition"] = val
@property
def maxdisplayed(self):
"""
The maximum number of displayed segments in a streamtube.
The 'maxdisplayed' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["maxdisplayed"]
@maxdisplayed.setter
def maxdisplayed(self, val):
self["maxdisplayed"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the surface. Please note that in the case
of using high `opacity` values for example a value greater than
or equal to 0.5 on two surfaces (and 0.25 with four surfaces),
an overlay of multiple transparent surfaces may not perfectly
be sorted in depth by the webgl API. This behavior may be
improved in the near future and is subject to change.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `cmin` will
correspond to the last color in the array and `cmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def scene(self):
"""
Sets a reference between this trace's 3D coordinate system and
a 3D scene. If "scene" (the default value), the (x,y,z)
coordinates refer to `layout.scene`. If "scene2", the (x,y,z)
coordinates refer to `layout.scene2`, and so on.
The 'scene' property is an identifier of a particular
subplot, of type 'scene', that may be specified as the string 'scene'
optionally followed by an integer >= 1
(e.g. 'scene', 'scene1', 'scene2', 'scene3', etc.)
Returns
-------
str
"""
return self["scene"]
@scene.setter
def scene(self, val):
self["scene"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def sizeref(self):
"""
The scaling factor for the streamtubes. The default is 1, which
avoids two max divergence tubes from touching at adjacent
starting positions.
The 'sizeref' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def starts(self):
"""
The 'starts' property is an instance of Starts
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Starts`
- A dict of string/value properties that will be passed
to the Starts constructor
Returns
-------
plotly.graph_objs.streamtube.Starts
"""
return self["starts"]
@starts.setter
def starts(self, val):
self["starts"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.streamtube.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets a text element associated with this trace. If trace
`hoverinfo` contains a "text" flag, this text element will be
seen in all hover labels. Note that streamtube traces do not
support array `text` values.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def u(self):
"""
Sets the x components of the vector field.
The 'u' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["u"]
@u.setter
def u(self, val):
self["u"] = val
@property
def uhoverformat(self):
"""
Sets the hover text formatting rulefor `u` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'uhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uhoverformat"]
@uhoverformat.setter
def uhoverformat(self, val):
self["uhoverformat"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def usrc(self):
"""
Sets the source reference on Chart Studio Cloud for `u`.
The 'usrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["usrc"]
@usrc.setter
def usrc(self, val):
self["usrc"] = val
@property
def v(self):
"""
Sets the y components of the vector field.
The 'v' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["v"]
@v.setter
def v(self, val):
self["v"] = val
@property
def vhoverformat(self):
"""
Sets the hover text formatting rulefor `v` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'vhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["vhoverformat"]
@vhoverformat.setter
def vhoverformat(self, val):
self["vhoverformat"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def vsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `v`.
The 'vsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["vsrc"]
@vsrc.setter
def vsrc(self, val):
self["vsrc"] = val
@property
def w(self):
"""
Sets the z components of the vector field.
The 'w' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["w"]
@w.setter
def w(self, val):
self["w"] = val
@property
def whoverformat(self):
"""
Sets the hover text formatting rulefor `w` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'whoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["whoverformat"]
@whoverformat.setter
def whoverformat(self, val):
self["whoverformat"] = val
@property
def wsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `w`.
The 'wsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["wsrc"]
@wsrc.setter
def wsrc(self, val):
self["wsrc"] = val
@property
def x(self):
"""
Sets the x coordinates of the vector field.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the y coordinates of the vector field.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def z(self):
"""
Sets the z coordinates of the vector field.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `zaxis.hoverformat`.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here u/v/w norm) or the
bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as u/v/w norm. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.streamtube.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.streamtube.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `tubex`, `tubey`, `tubez`, `tubeu`,
`tubev`, `tubew`, `norm` and `divergence`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.streamtube.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.streamtube.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.streamtube.Lightposition`
instance or dict with compatible properties
maxdisplayed
The maximum number of displayed segments in a
streamtube.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
sizeref
The scaling factor for the streamtubes. The default is
1, which avoids two max divergence tubes from touching
at adjacent starting positions.
starts
:class:`plotly.graph_objects.streamtube.Starts`
instance or dict with compatible properties
stream
:class:`plotly.graph_objects.streamtube.Stream`
instance or dict with compatible properties
text
Sets a text element associated with this trace. If
trace `hoverinfo` contains a "text" flag, this text
element will be seen in all hover labels. Note that
streamtube traces do not support array `text` values.
u
Sets the x components of the vector field.
uhoverformat
Sets the hover text formatting rulefor `u` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
usrc
Sets the source reference on Chart Studio Cloud for
`u`.
v
Sets the y components of the vector field.
vhoverformat
Sets the hover text formatting rulefor `v` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
vsrc
Sets the source reference on Chart Studio Cloud for
`v`.
w
Sets the z components of the vector field.
whoverformat
Sets the hover text formatting rulefor `w` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
wsrc
Sets the source reference on Chart Studio Cloud for
`w`.
x
Sets the x coordinates of the vector field.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates of the vector field.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates of the vector field.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
lighting=None,
lightposition=None,
maxdisplayed=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
reversescale=None,
scene=None,
showlegend=None,
showscale=None,
sizeref=None,
starts=None,
stream=None,
text=None,
u=None,
uhoverformat=None,
uid=None,
uirevision=None,
usrc=None,
v=None,
vhoverformat=None,
visible=None,
vsrc=None,
w=None,
whoverformat=None,
wsrc=None,
x=None,
xhoverformat=None,
xsrc=None,
y=None,
yhoverformat=None,
ysrc=None,
z=None,
zhoverformat=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Streamtube object
Use a streamtube trace to visualize flow in a vector field.
Specify a vector field using 6 1D arrays of equal length, 3
position arrays `x`, `y` and `z` and 3 vector component arrays
`u`, `v`, and `w`. By default, the tubes' starting positions
will be cut from the vector field's x-z plane at its minimum y
value. To specify your own starting position, use attributes
`starts.x`, `starts.y` and `starts.z`. The color is encoded by
the norm of (u, v, w), and the local radius by the divergence
of (u, v, w).
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Streamtube`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here u/v/w norm) or the
bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as u/v/w norm. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.streamtube.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.streamtube.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `tubex`, `tubey`, `tubez`, `tubeu`,
`tubev`, `tubew`, `norm` and `divergence`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.streamtube.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.streamtube.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.streamtube.Lightposition`
instance or dict with compatible properties
maxdisplayed
The maximum number of displayed segments in a
streamtube.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
sizeref
The scaling factor for the streamtubes. The default is
1, which avoids two max divergence tubes from touching
at adjacent starting positions.
starts
:class:`plotly.graph_objects.streamtube.Starts`
instance or dict with compatible properties
stream
:class:`plotly.graph_objects.streamtube.Stream`
instance or dict with compatible properties
text
Sets a text element associated with this trace. If
trace `hoverinfo` contains a "text" flag, this text
element will be seen in all hover labels. Note that
streamtube traces do not support array `text` values.
u
Sets the x components of the vector field.
uhoverformat
Sets the hover text formatting rulefor `u` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
usrc
Sets the source reference on Chart Studio Cloud for
`u`.
v
Sets the y components of the vector field.
vhoverformat
Sets the hover text formatting rulefor `v` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
vsrc
Sets the source reference on Chart Studio Cloud for
`v`.
w
Sets the z components of the vector field.
whoverformat
Sets the hover text formatting rulefor `w` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
wsrc
Sets the source reference on Chart Studio Cloud for
`w`.
x
Sets the x coordinates of the vector field.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates of the vector field.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates of the vector field.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Streamtube
"""
super().__init__("streamtube")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Streamtube
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Streamtube`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("lighting", arg, lighting)
self._set_property("lightposition", arg, lightposition)
self._set_property("maxdisplayed", arg, maxdisplayed)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("reversescale", arg, reversescale)
self._set_property("scene", arg, scene)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("sizeref", arg, sizeref)
self._set_property("starts", arg, starts)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("u", arg, u)
self._set_property("uhoverformat", arg, uhoverformat)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("usrc", arg, usrc)
self._set_property("v", arg, v)
self._set_property("vhoverformat", arg, vhoverformat)
self._set_property("visible", arg, visible)
self._set_property("vsrc", arg, vsrc)
self._set_property("w", arg, w)
self._set_property("whoverformat", arg, whoverformat)
self._set_property("wsrc", arg, wsrc)
self._set_property("x", arg, x)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("ysrc", arg, ysrc)
self._set_property("z", arg, z)
self._set_property("zhoverformat", arg, zhoverformat)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "streamtube"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Streamtube |
python | plotly__plotly.py | plotly/graph_objs/choroplethmapbox/_hoverlabel.py | {
"start": 233,
"end": 11304
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmapbox.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | huggingface__transformers | src/transformers/models/unispeech_sat/modeling_unispeech_sat.py | {
"start": 2459,
"end": 4140
} | class ____(ModelOutput):
r"""
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://huggingface.co/papers/2006.11477).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*):
Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
The perplexity of the codevector distribution, used to measure the diversity of the codebook.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
projected_states: Optional[torch.FloatTensor] = None
projected_quantized_states: Optional[torch.FloatTensor] = None
codevector_perplexity: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| UniSpeechSatForPreTrainingOutput |
python | huggingface__transformers | src/transformers/models/bark/modeling_bark.py | {
"start": 37294,
"end": 57216
} | class ____(BarkPreTrainedModel):
base_model_prefix = "fine_acoustics"
config: BarkFineConfig
main_input_name = "codebook_idx"
def __init__(self, config):
# non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec
super().__init__(config)
self.config = config
self._tied_weights_keys = {}
for i in range(self.config.n_codes_total - self.config.n_codes_given):
self._tied_weights_keys[f"lm_heads.{i}.weight"] = f"input_embeds_layers.{i + 1}.weight"
# initialize a modified non causal GPT-like model
# note that for there is one embedding layer and one lm_head for each codebook of Encodec
self.input_embeds_layers = nn.ModuleList(
[nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)]
)
self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size)
self.drop = nn.Dropout(config.dropout)
self.layers = nn.ModuleList(
[BarkBlock(config, is_causal=False, layer_idx=i) for i in range(config.num_layers)]
)
self.layernorm_final = nn.LayerNorm(config.hidden_size)
self.lm_heads = nn.ModuleList(
[
nn.Linear(config.hidden_size, config.output_vocab_size, bias=False)
for _ in range(config.n_codes_given, config.n_codes_total)
]
)
self.gradient_checkpointing = False
self.n_codes_total = config.n_codes_total
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
# one embedding layers for each codebook
return self.input_embeds_layers
def set_input_embeddings(self, new_embeddings):
# one embedding layers for each codebook
self.input_embeds_layers = new_embeddings
def get_output_embeddings(self):
# one lm_head for each codebook
return self.lm_heads
def set_output_embeddings(self, new_output_embeddings):
# one lm_head for each codebook
self.lm_heads = new_output_embeddings
def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True):
old_embeddings_list = self.get_input_embeddings()
new_embeddings_list = nn.ModuleList(
[
self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing)
for old_embeddings in old_embeddings_list
]
)
self.set_input_embeddings(new_embeddings_list)
new_num_tokens = new_embeddings_list[0].weight.shape[0]
# if word embeddings are not tied, make sure that lm head is resized as well
if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
old_lm_head_list = self.get_output_embeddings()
new_lm_head_list = nn.ModuleList(
[self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list]
)
self.set_output_embeddings(new_lm_head_list)
return self.get_input_embeddings()
def resize_token_embeddings(
self,
new_num_tokens: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
mean_resizing: bool = True,
) -> nn.Embedding:
"""
Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens (`int`, *optional*):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
pad_to_multiple_of (`int`, *optional*):
If set will pad the embedding matrix to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more
details about this, or help on choosing the correct value for resizing, refer to this guide:
https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
mean_resizing (`bool`):
Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and
covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models,
where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the
old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings.
Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
Return:
`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
if new_num_tokens is None and pad_to_multiple_of is None:
return model_embeds
# Update base model and current model config
self.config.output_vocab_size = model_embeds[0].weight.shape[0]
self.config.vocab_size = model_embeds[0].weight.shape[0]
self.output_vocab_size = model_embeds[0].weight.shape[0]
self.vocab_size = model_embeds[0].weight.shape[0]
# Tie weights again if needed
self.tie_weights()
return model_embeds
@auto_docstring
def forward(
self,
codebook_idx: int, # an additional idx corresponding to the id of the codebook that will be predicted
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
input_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
codebook_idx (`int`):
Index of the codebook that will be predicted.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
NOT IMPLEMENTED YET.
input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If
`past_key_values` is used, optionally only the last `input_embeds` have to be input (see
`past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into
associated vectors than the model's internal embedding lookup matrix.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError("Training is not implemented yet")
if codebook_idx == 0:
raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model")
if input_ids is not None and input_embeds is not None:
raise ValueError("You cannot specify both input_ids and input_embeds at the same time")
if input_ids is None and input_embeds is None:
raise ValueError("You have to specify either input_ids or input_embeds")
if input_ids is not None:
# the input_embeddings are the sum of the j previous codebooks embeddings before
# the current codebook_idx codebook
# forward the GPT model itself
input_embeds = [
input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1)
for i, input_embeds_layer in enumerate(self.input_embeds_layers)
] # token embeddings of shape (b, t, n_embd)
input_embeds = torch.cat(input_embeds, dim=-1)
input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1)
input_shape = input_embeds.size()[:-1]
batch_size = input_embeds.shape[0]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else input_embeds.device
if position_ids is None:
position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0) # shape (1, seq_length)
position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd)
# Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
if self.config._attn_implementation == "flash_attention_2":
attention_mask = attention_mask if 0 in attention_mask else None
else:
# [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length]
# from_seq_length is 1 to easily broadcast
attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1)
hidden_states = self.drop(input_embeds + position_embeds)
output_shape = input_shape + (hidden_states.size(-1),)
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
hidden_states = self.layernorm_final(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states)
if not return_dict:
return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None)
return MaskedLMOutput(
loss=loss,
logits=logits,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@torch.no_grad()
def generate(
self,
coarse_output: torch.Tensor,
semantic_generation_config: Optional[BarkSemanticGenerationConfig] = None,
coarse_generation_config: Optional[BarkCoarseGenerationConfig] = None,
fine_generation_config: BarkFineGenerationConfig = None,
codebook_size: int = 1024,
history_prompt: Optional[dict[str, torch.Tensor]] = None,
**kwargs,
) -> torch.LongTensor:
"""
Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker
prompt.
Args:
coarse_output (`torch.Tensor` of shape (batch_size, seq_len)):
Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`.
semantic_generation_config (`BarkSemanticGenerationConfig`):
Generation config indicating how to generate the semantic tokens.
coarse_generation_config (`BarkCoarseGenerationConfig`):
Generation config indicating how to generate the coarse tokens.
fine_generation_config (`BarkFineGenerationConfig`):
Generation config indicating how to generate the fine tokens.
codebook_size (`int`, *optional*, defaults to 1024):
Codebook channel size, i.e. the size of the output vocabulary per codebook channel.
history_prompt (`Optional[dict[str,torch.Tensor]]`, *optional*):
Optional `Bark` speaker prompt.
Returns:
torch.LongTensor: Output fine acoustics tokens.
"""
if semantic_generation_config is None:
raise ValueError("`semantic_generation_config` has to be provided")
if coarse_generation_config is None:
raise ValueError("`coarse_generation_config` has to be provided")
if fine_generation_config is None:
raise ValueError("`fine_generation_config` has to be provided")
# since we don't really use GenerationConfig through the fine model (autoencoder)
# and since only temperature is used from the classic GenerationConfig parameters
# manually impose the kwargs priority over the generation config
temperature = kwargs.get("temperature", fine_generation_config.temperature)
max_fine_history_length = fine_generation_config.max_fine_history_length
max_fine_input_length = fine_generation_config.max_fine_input_length
# shape: (batch, n_coarse_codebooks * seq_len)
# new_shape: (batch, seq_len, n_coarse_codebooks)
coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks)
# brings ids into the range [0, codebook_size -1]
coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size)
batch_size = coarse_output.shape[0]
if history_prompt is not None:
x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0)
# transpose to get to shape (seq_len, n_fine_codebooks)
else:
x_fine_history = None
n_coarse = coarse_generation_config.n_coarse_codebooks
# pad the last 6th codebooks
fine_input = F.pad(
coarse_output,
(0, fine_generation_config.n_fine_codebooks - n_coarse),
"constant",
codebook_size,
)
# prepend history if available (max max_fine_history_length)
if x_fine_history is not None:
fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1)
# len of the fine_history that has been added to fine_input
n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1]
else:
n_history = 0
n_remove_from_end = 0
# need to pad if too short (since non-causal model)
if fine_input.shape[1] < max_fine_input_length:
n_remove_from_end = max_fine_input_length - fine_input.shape[1]
fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size)
# we can be lazy about fractional loop and just keep overwriting codebooks.
# seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end
# So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0)
# If not, we loop over at least twice.
n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length
n_loops = int(np.ceil(n_loops))
n_loops = max(0, n_loops) + 1
for n_outer in range(n_loops):
start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length])
start_fill_idx = min(
[n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length]
)
rel_start_fill_idx = start_fill_idx - start_idx
input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :]
for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks):
logits = self.forward(n_inner, input_buffer).logits
if temperature is None or temperature == 1.0:
relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size]
codebook_preds = torch.argmax(relevant_logits, -1)
else:
relevant_logits = logits[:, :, :codebook_size] / temperature
# apply softmax
probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length]
# reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size)
probs = probs.reshape((-1, codebook_size))
# multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1)
codebook_preds = codebook_preds.to(torch.int32)
input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds
del logits, codebook_preds
# transfer into fine_input
for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks):
fine_input[
:, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner
] = input_buffer[:, rel_start_fill_idx:, n_inner]
del input_buffer
fine_input = fine_input.transpose(1, 2)[:, :, n_history:]
if n_remove_from_end > 0:
fine_input = fine_input[:, :, :-n_remove_from_end]
if fine_input.shape[-1] != coarse_output.shape[-2]:
raise ValueError("input and output should have the same seq_len")
return fine_input
@auto_docstring(
custom_intro="""
The full Bark model, a text-to-speech model composed of 4 sub-models:
- [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that
takes
as input tokenized text, and predicts semantic text tokens that capture the meaning of the text.
- [`BarkCoarseModel`] (also referred to as the 'coarse acoustics' model), also a causal autoregressive transformer,
that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary
to `encodec`.
- [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively
predicts the last codebooks based on the sum of the previous codebooks embeddings.
- having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio
array.
It should be noted that each of the first three modules can support conditional speaker embeddings to condition the
output sound according to specific predefined voice.
"""
)
| BarkFineModel |
python | run-llama__llama_index | llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-tablestore/llama_index/storage/chat_store/tablestore/base.py | {
"start": 690,
"end": 9095
} | class ____(BaseChatStore):
"""
Tablestore Chat Store.
Args:
tablestore_client (OTSClient, optional): External tablestore(ots) client.
If this parameter is set, the following endpoint/instance_name/access_key_id/access_key_secret will be ignored.
endpoint (str, optional): Tablestore instance endpoint.
instance_name (str, optional): Tablestore instance name.
access_key_id (str, optional): Aliyun access key id.
access_key_secret (str, optional): Aliyun access key secret.
table_name (str, optional): Tablestore table name.
Returns:
TablestoreChatStore: A Tablestore chat store object.
"""
table_name: str
_primary_key: str = "session_id"
_history_column: str = "history"
_tablestore_client: tablestore.OTSClient
def __init__(
self,
tablestore_client: Optional[tablestore.OTSClient] = None,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
table_name: str = "llama_index_chat_store_v1",
**kwargs: Any,
) -> None:
super().__init__(
table_name=table_name,
)
if not tablestore_client:
self._tablestore_client = tablestore.OTSClient(
endpoint,
access_key_id,
access_key_secret,
instance_name,
retry_policy=tablestore.WriteRetryPolicy(),
**kwargs, # pass additional arguments
)
else:
self._tablestore_client = tablestore_client
def create_table_if_not_exist(self) -> None:
"""Create table if not exist."""
table_list = self._tablestore_client.list_table()
if self.table_name in table_list:
logger.info(
f"Tablestore chat store table[{self.table_name}] already exists"
)
return
logger.info(
f"Tablestore chat store table[{self.table_name}] does not exist, try to create the table."
)
table_meta = tablestore.TableMeta(
self.table_name, [(self._primary_key, "STRING")]
)
reserved_throughput = tablestore.ReservedThroughput(
tablestore.CapacityUnit(0, 0)
)
self._tablestore_client.create_table(
table_meta, tablestore.TableOptions(), reserved_throughput
)
logger.info(
f"Tablestore create chat store table[{self.table_name}] successfully."
)
def clear_store(self):
"""Delete all messages."""
keys = self.get_keys()
for key in keys:
self.delete_messages(key)
@classmethod
def class_name(self) -> str:
return "TablestoreChatStore"
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""
Assign all provided messages to the row with the given key.
Any pre-existing messages for that key will be overwritten.
Args:
key (str): The key specifying a row.
messages (List[ChatMessage]): The messages to assign to the key.
Returns:
None
"""
primary_key = [(self._primary_key, key)]
attribute_columns = [
(
self._history_column,
json.dumps(_messages_to_dict(messages), ensure_ascii=False),
),
]
row = tablestore.Row(primary_key, attribute_columns)
self._tablestore_client.put_row(self.table_name, row)
def get_messages(self, key: str) -> List[ChatMessage]:
"""
Retrieve all messages for the given key.
Args:
key (str): The key specifying a row.
Returns:
List[ChatMessage]: The messages associated with the key.
"""
primary_key = [(self._primary_key, key)]
_, row, _ = self._tablestore_client.get_row(
self.table_name, primary_key, None, None, 1
)
history = {}
if row is not None:
for col in row.attribute_columns:
key = col[0]
val = col[1]
if key == self._history_column:
history = json.loads(val)
continue
return [_dict_to_message(message) for message in history]
def add_message(self, key: str, message: ChatMessage) -> None:
"""
Add a message to the end of the chat history for the given key.
Creates a new row if the key does not exist.
Args:
key (str): The key specifying a row.
message (ChatMessage): The message to add to the chat history.
Returns:
None
"""
current_messages = self.get_messages(key)
current_messages.append(message)
self.set_messages(key, current_messages)
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""
Deletes the entire chat history for the given key (i.e. the row).
Args:
key (str): The key specifying a row.
Returns:
Optional[List[ChatMessage]]: The messages that were deleted. None if the
deletion failed.
"""
messages_to_delete = self.get_messages(key)
primary_key = [(self._primary_key, key)]
self._tablestore_client.delete_row(self.table_name, primary_key, None)
return messages_to_delete
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""
Deletes the message at the given index for the given key.
Args:
key (str): The key specifying a row.
idx (int): The index of the message to delete.
Returns:
Optional[ChatMessage]: The message that was deleted. None if the index
did not exist.
"""
current_messages = self.get_messages(key)
try:
message_to_delete = current_messages[idx]
del current_messages[idx]
self.set_messages(key, current_messages)
return message_to_delete
except IndexError:
logger.error(
IndexError(f"No message exists at index, {idx}, for key {key}")
)
return None
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""
Deletes the last message in the chat history for the given key.
Args:
key (str): The key specifying a row.
Returns:
Optional[ChatMessage]: The message that was deleted. None if the chat history
was empty.
"""
return self.delete_message(key, -1)
def get_keys(self) -> List[str]:
"""
Retrieve all keys in the table.
Returns:
List[str]: The keys in the table.
"""
keys = []
inclusive_start_primary_key = [(self._primary_key, tablestore.INF_MIN)]
exclusive_end_primary_key = [(self._primary_key, tablestore.INF_MAX)]
limit = 5000
columns_to_get = []
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
self.table_name,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
if row_list:
for row in row_list:
keys.append(row.primary_key[0][1])
while next_start_primary_key is not None:
inclusive_start_primary_key = next_start_primary_key
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
self.table_name,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
if row_list:
for row in row_list:
keys.append(row.primary_key[0][1])
return keys
| TablestoreChatStore |
python | aio-libs__aiohttp | aiohttp/abc.py | {
"start": 682,
"end": 1327
} | class ____(ABC):
def __init__(self) -> None:
self._frozen = False
def post_init(self, app: Application) -> None:
"""Post init stage.
Not an abstract method for sake of backward compatibility,
but if the router wants to be aware of the application
it can override this.
"""
@property
def frozen(self) -> bool:
return self._frozen
def freeze(self) -> None:
"""Freeze router."""
self._frozen = True
@abstractmethod
async def resolve(self, request: Request) -> "AbstractMatchInfo":
"""Return MATCH_INFO for given request"""
| AbstractRouter |
python | sympy__sympy | sympy/polys/orderings.py | {
"start": 4157,
"end": 8051
} | class ____(MonomialOrder):
"""
The "inverse" of another monomial order.
If O is any monomial order, we can construct another monomial order iO
such that `A >_{iO} B` if and only if `B >_O A`. This is useful for
constructing local orders.
Note that many algorithms only work with *global* orders.
For example, in the inverse lexicographic order on a single variable `x`,
high powers of `x` count as small:
>>> from sympy.polys.orderings import lex, InverseOrder
>>> ilex = InverseOrder(lex)
>>> ilex((5,)) < ilex((0,))
True
"""
def __init__(self, O):
self.O = O
def __str__(self):
return "i" + str(self.O)
def __call__(self, monomial):
def inv(l):
if iterable(l):
return tuple(inv(x) for x in l)
return -l
return inv(self.O(monomial))
@property
def is_global(self):
if self.O.is_global is True:
return False
if self.O.is_global is False:
return True
return None
def __eq__(self, other):
return isinstance(other, InverseOrder) and other.O == self.O
def __hash__(self):
return hash((self.__class__, self.O))
lex = LexOrder()
grlex = GradedLexOrder()
grevlex = ReversedGradedLexOrder()
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
igrevlex = InverseOrder(grevlex)
_monomial_key: dict[str, MonomialOrder] = {
'lex': lex,
'grlex': grlex,
'grevlex': grevlex,
'ilex': ilex,
'igrlex': igrlex,
'igrevlex': igrevlex
}
@overload
def monomial_key(
order: str | Symbol | None = None, gens: None = None
) -> MonomialOrder: ...
@overload
def monomial_key(order: MonomKey, gens: None = None) -> MonomKey: ...
@overload
def monomial_key(
order: str | Symbol | MonomKey | None = None, *, gens: Sequence[Symbol]
) -> Callable[[Expr], Any]: ...
@overload
def monomial_key(
order: str | Symbol | MonomKey | None, gens: Sequence[Symbol]
) -> Callable[[Expr], Any]: ...
def monomial_key(
order: str | Symbol | MonomKey | None = None, gens: Sequence[Symbol] | None = None
) -> MonomKey | MonomialOrder | Callable[[Expr], Any]:
"""
Return a function defining admissible order on monomials.
The result of a call to :func:`monomial_key` is a function which should
be used as a key to :func:`sorted` built-in function, to provide order
in a set of monomials of the same length.
Currently supported monomial orderings are:
1. lex - lexicographic order (default)
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
4. ilex, igrlex, igrevlex - the corresponding inverse orders
If the ``order`` input argument is not a string but has ``__call__``
attribute, then it will pass through with an assumption that the
callable object defines an admissible order on monomials.
If the ``gens`` input argument contains a list of generators, the
resulting key function can be used to sort SymPy ``Expr`` objects.
"""
func: Callable[[tuple[int, ...]], Any]
if order is None:
func = lex
elif isinstance(order, (str, Symbol)):
order = str(order)
try:
func = _monomial_key[order]
except KeyError:
raise ValueError("supported monomial orderings are 'lex', 'grlex' and 'grevlex', got %r" % order)
elif hasattr(order, '__call__'):
func = order
else:
raise ValueError("monomial ordering specification must be a string or a callable, got %s" % order)
if gens is not None:
# XXX: Remove this. It should be defined somewhere else and not part
# of the monomial_key function.
def _func(expr: Expr):
return func(expr.as_poly(*gens).degree_list()) # type: ignore
return _func
return func
| InverseOrder |
python | django__django | django/contrib/admin/utils.py | {
"start": 6214,
"end": 17193
} | class ____(Collector):
def __init__(self, *args, force_collection=True, **kwargs):
super().__init__(*args, force_collection=force_collection, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_objs = defaultdict(set)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith("+"):
related_name = source_attr % {
"class": source._meta.model_name,
"app_label": source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_objs[obj._meta.model].add(obj)
try:
return super().collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
except models.RestrictedError as e:
self.protected.update(e.restricted_objects)
def related_objects(self, related_model, related_fields, objs):
qs = super().related_objects(related_model, related_fields, objs)
return qs.select_related(
*[related_field.name for related_field in related_fields]
)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
"verbose_name": opts.verbose_name,
"verbose_name_plural": opts.verbose_name_plural,
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ngettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except (FieldDoesNotExist, FieldIsAForeignKeyColumnName):
# For non-regular field values, the value is either a method,
# property, related field, or returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif hasattr(model_admin, name) and name != "__str__":
attr = getattr(model_admin, name)
value = attr(obj)
else:
sentinel = object()
attr = getattr(obj, name, sentinel)
if callable(attr):
value = attr()
else:
if attr is sentinel:
attr = obj
for part in name.split(LOOKUP_SEP):
attr = getattr(attr, part, sentinel)
if attr is sentinel:
return None, None, None
value = attr
if hasattr(model_admin, "model") and hasattr(model_admin.model, name):
attr = getattr(model_admin.model, name)
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
Reverse relations should also be excluded as these aren't attributes of the
model (rather something like `foo_set`).
"""
field = opts.get_field(name)
if (
field.is_relation
and
# Generic foreign keys OR reverse relations
((field.many_to_one and not field.related_model) or field.one_to_many)
):
raise FieldDoesNotExist()
# Avoid coercing <FK>_id fields to FK
if (
field.is_relation
and not field.many_to_many
and hasattr(field, "attname")
and field.attname == name
):
raise FieldIsAForeignKeyColumnName()
return field
def label_for_field(name, model, model_admin=None, return_attr=False, form=None):
"""
Return a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator), or the name of an
object's attribute, as well as a model field, including across related
objects. If return_attr is True, also return the resolved attribute
(which could be a callable). This will be None if (and only if) the name
refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__str__":
label = str(model._meta.verbose_name)
attr = str
else:
if callable(name):
attr = name
elif hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
elif form and name in form.fields:
attr = form.fields[name]
else:
try:
attr = get_fields_from_path(model, name)[-1]
except (FieldDoesNotExist, NotRelationField):
message = f"Unable to lookup '{name}' on {model._meta.object_name}"
if model_admin:
message += f" or {model_admin.__class__.__name__}"
if form:
message += f" or {form.__class__.__name__}"
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (
isinstance(attr, property)
and hasattr(attr, "fget")
and hasattr(attr.fget, "short_description")
):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
except FieldIsAForeignKeyColumnName:
label = pretty_name(name)
attr = name
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except (FieldDoesNotExist, FieldIsAForeignKeyColumnName):
pass
else:
if hasattr(field, "help_text"):
help_text = field.help_text
return help_text
def display_for_field(value, field, empty_value_display, avoid_link=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if field.name == "password" and field.model == get_user_model():
return render_password_as_hash(value)
elif getattr(field, "flatchoices", None):
try:
return dict(field.flatchoices).get(value, empty_value_display)
except TypeError:
# Allow list-like choices.
flatchoices = make_hashable(field.flatchoices)
value = make_hashable(value)
return dict(flatchoices).get(value, empty_value_display)
# BooleanField needs special-case null-handling, so it comes before the
# general null test.
elif isinstance(field, models.BooleanField):
return _boolean_icon(value)
elif value in field.empty_values:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value and not avoid_link:
return format_html('<a href="{}">{}</a>', value.url, value)
elif isinstance(field, models.URLField) and value and not avoid_link:
return format_html('<a href="{}">{}</a>', value, value)
elif isinstance(field, models.JSONField) and value:
try:
return json.dumps(value, ensure_ascii=False, cls=field.encoder)
except TypeError:
return display_for_value(value, empty_value_display)
else:
return display_for_value(value, empty_value_display)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value in EMPTY_VALUES:
return empty_value_display
elif isinstance(value, bool):
return str(value)
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, (int, decimal.Decimal, float)):
return formats.number_format(value)
elif isinstance(value, (list, tuple)):
return ", ".join(str(v) for v in value)
else:
return str(value)
| NestedObjects |
python | pallets__click | src/click/_winconsole.py | {
"start": 3671,
"end": 4695
} | class ____(_WindowsConsoleRawIOBase):
def readable(self) -> t.Literal[True]:
return True
def readinto(self, b: Buffer) -> int:
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError(
"cannot read odd number of bytes from UTF-16-LE encoded console"
)
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(
HANDLE(self.handle),
buffer,
code_units_to_be_read,
byref(code_units_read),
None,
)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError(f"Windows error: {GetLastError()}")
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
| _WindowsConsoleReader |
python | crytic__slither | slither/printers/summary/cheatcodes.py | {
"start": 241,
"end": 2593
} | class ____(AbstractPrinter):
ARGUMENT = "cheatcode"
HELP = """
Print the usage of (Foundry) cheatcodes in the code.
For the complete list of Cheatcodes, see https://book.getfoundry.sh/cheatcodes/
"""
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#cheatcode"
def output(self, filename: str) -> output.Output:
info: str = ""
try:
vm = self.slither.get_contract_from_name("Vm").pop()
except IndexError:
return output.Output("No contract named VM found")
for contract in self.slither.contracts_derived:
# Check that the IS_TEST variable is set. (Only works for Foundry)
is_test_var = contract.variables_as_dict.get("IS_TEST", None)
is_test = False
if is_test_var is not None:
try:
is_test = is_test_var.expression.value == "true"
except AttributeError:
pass
if not is_test:
continue
found_contract: bool = False
contract_info: str = ""
for func in contract.functions_declared:
function_info = f"\t{func}\n"
found_function: bool = False
for node in func.nodes:
for op in node.all_slithir_operations():
if (
isinstance(op, HighLevelCall)
and op.function.contract == vm
and op.function.visibility == "external"
):
found_function = True
function_info += (
f"\t\t{op.function.name} - ({node.source_mapping.to_detailed_str()})\n"
f"\t\t{node.expression}\n\n"
)
if found_function:
if found_contract is False:
contract_info = f"{contract} ({contract.source_mapping.filename.short})\n"
found_contract = True
contract_info += function_info
if found_contract:
info += contract_info
self.info(info)
res = output.Output(info)
return res
| CheatcodePrinter |
python | fluentpython__example-code-2e | 24-class-metaprog/qualname/models.py | {
"start": 56,
"end": 262
} | class ____(models.Model):
horn_length = models.IntegerField()
class Meta:
ordering = ['horn_length']
verbose_name_plural = 'oxen'
print(Ox.Meta.__name__)
print(Ox.Meta.__qualname__)
| Ox |
python | kamyu104__LeetCode-Solutions | Python/maximum-width-of-binary-tree.py | {
"start": 29,
"end": 582
} | class ____(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(node, i, depth, leftmosts):
if not node:
return 0
if depth >= len(leftmosts):
leftmosts.append(i)
return max(i-leftmosts[depth]+1, \
dfs(node.left, i*2, depth+1, leftmosts), \
dfs(node.right, i*2+1, depth+1, leftmosts))
leftmosts = []
return dfs(root, 1, 0, leftmosts)
| Solution |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 1143,
"end": 2789
} | class ____(unittest.TestCase, metaclass=TraceMethodCallMeta):
"""
Base class for test cases. On teardown it kills the inferior and unsets
all breakpoints.
"""
def __init__(self, name):
super().__init__(name)
self.cy = libcython.cy
self.module = libcython.cy.cython_namespace['codefile']
self.spam_func, self.spam_meth = libcython.cy.functions_by_name['spam']
self.ham_func = libcython.cy.functions_by_qualified_name[
'codefile.ham']
self.eggs_func = libcython.cy.functions_by_qualified_name[
'codefile.eggs']
def read_var(self, varname, cast_to=None):
result = gdb.parse_and_eval('$cy_cvalue("%s")' % varname)
if cast_to:
result = cast_to(result)
return result
def local_info(self):
return gdb.execute('info locals', to_string=True)
def lineno_equals(self, source_line=None, lineno=None):
if source_line is not None:
lineno = test_libcython.source_to_lineno[source_line]
frame = gdb.selected_frame()
self.assertEqual(libcython.cython_info.lineno(frame), lineno)
def break_and_run(self, source_line):
break_lineno = test_libcython.source_to_lineno[source_line]
gdb.execute('cy break codefile:%d' % break_lineno, to_string=True)
gdb.execute('run', to_string=True)
def tearDown(self):
gdb.execute('delete breakpoints', to_string=True)
try:
gdb.execute('kill inferior 1', to_string=True)
except RuntimeError:
pass
gdb.execute('set args -c "import codefile"')
| DebugTestCase |
python | python-excel__xlrd | xlrd/formatting.py | {
"start": 43593,
"end": 45573
} | class ____(BaseObject):
"""
eXtended Formatting information for cells, rows, columns and styles.
Each of the 6 flags below describes the validity of
a specific group of attributes.
In cell XFs:
- ``flag==0`` means the attributes of the parent style ``XF`` are
used, (but only if the attributes are valid there);
- ``flag==1`` means the attributes of this ``XF`` are used.
In style XFs:
- ``flag==0`` means the attribute setting is valid;
- ``flag==1`` means the attribute should be ignored.
.. note::
the API provides both "raw" XFs and "computed" XFs. In the latter case,
cell XFs have had the above inheritance mechanism applied.
.. versionadded:: 0.6.1
"""
#: 0 = cell XF, 1 = style XF
is_style = 0
#: cell XF: Index into Book.xf_list of this XF's style XF
#:
#: style XF: 0xFFF
parent_style_index = 0
#
_format_flag = 0
#
_font_flag = 0
#
_alignment_flag = 0
#
_border_flag = 0
#
_background_flag = 0
_protection_flag = 0
#: Index into :attr:`~xlrd.book.Book.xf_list`
xf_index = 0
#: Index into :attr:`~xlrd.book.Book.font_list`
font_index = 0
#: Key into :attr:`~xlrd.book.Book.format_map`
#:
#: .. warning::
#: OOo docs on the XF record call this "Index to FORMAT record".
#: It is not an index in the Python sense. It is a key to a map.
#: It is true *only* for Excel 4.0 and earlier files
#: that the key into format_map from an XF instance
#: is the same as the index into format_list, and *only*
#: if the index is less than 164.
format_key = 0
#: An instance of an :class:`XFProtection` object.
protection = None
#: An instance of an :class:`XFBackground` object.
background = None
#: An instance of an :class:`XFAlignment` object.
alignment = None
#: An instance of an :class:`XFBorder` object.
border = None
| XF |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1597421,
"end": 1597593
} | class ____(sgqlc.types.Union):
"""Types that can be assigned to reactions."""
__schema__ = github_schema
__types__ = (Bot, Mannequin, Organization, User)
| Reactor |
python | celery__celery | t/unit/backends/test_cache.py | {
"start": 6621,
"end": 8245
} | class ____(MockCacheMixin):
def test_pylibmc(self):
with self.mock_pylibmc():
with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
assert cache.get_best_memcache()[0].__module__ == 'pylibmc'
@pytest.mark.masked_modules('pylibmc')
def test_memcache(self, mask_modules):
with self.mock_memcache():
with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
assert (cache.get_best_memcache()[0]().__module__ ==
'memcache')
@pytest.mark.masked_modules('pylibmc', 'memcache')
def test_no_implementations(self, mask_modules):
with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
with pytest.raises(ImproperlyConfigured):
cache.get_best_memcache()
def test_cached(self):
with self.mock_pylibmc():
with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
cache.get_best_memcache()[0](behaviors={'foo': 'bar'})
assert cache._imp[0]
cache.get_best_memcache()[0]()
def test_backends(self):
from celery.backends.cache import backends
with self.mock_memcache():
for name, fun in backends.items():
assert fun()
| test_get_best_memcache |
python | ansible__ansible | test/units/parsing/vault/test_vault.py | {
"start": 1883,
"end": 2794
} | class ____(unittest.TestCase):
def test(self):
b_plain_data = b'some text to hexlify'
b_data = hexlify(b_plain_data)
res = vault._unhexlify(b_data)
self.assertEqual(res, b_plain_data)
def test_odd_length(self):
b_data = b'123456789abcdefghijklmnopqrstuvwxyz'
self.assertRaisesRegex(vault.AnsibleVaultFormatError,
'.*Vault format unhexlify error.*',
vault._unhexlify,
b_data)
def test_nonhex(self):
b_data = b'6z36316566653264333665333637623064303639353237620a636366633565663263336335656532'
self.assertRaisesRegex(vault.AnsibleVaultFormatError,
'.*Vault format unhexlify error.*Non-hexadecimal digit found',
vault._unhexlify,
b_data)
| TestUnhexlify |
python | protocolbuffers__protobuf | python/google/protobuf/json_format.py | {
"start": 16285,
"end": 37303
} | class ____(object):
"""JSON format parser for protocol message."""
def __init__(
self, ignore_unknown_fields, descriptor_pool, max_recursion_depth
):
self.ignore_unknown_fields = ignore_unknown_fields
self.descriptor_pool = descriptor_pool
self.max_recursion_depth = max_recursion_depth
self.recursion_depth = 0
def ConvertMessage(self, value, message, path):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
path: parent path to log parse error info.
Raises:
ParseError: In case of convert problems.
"""
self.recursion_depth += 1
if self.recursion_depth > self.max_recursion_depth:
raise ParseError(
'Message too deep. Max recursion depth is {0}'.format(
self.max_recursion_depth
)
)
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if not path:
path = message_descriptor.name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message, path)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self)
else:
self._ConvertFieldValuePair(value, message, path)
self.recursion_depth -= 1
def _ConvertFieldValuePair(self, js, message, path):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
path: parent path to log parse error info.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
fields_by_json_name = dict(
(f.json_name, f) for f in message_descriptor.fields
)
def _ClearFieldOrExtension(message, field):
if field.is_extension:
message.ClearExtension(field)
else:
message.ClearField(field.name)
def _GetFieldOrExtension(message, field):
if field.is_extension:
return message.Extensions[field]
else:
return getattr(message, field.name)
def _SetFieldOrExtension(message, field, value):
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
for name in js:
try:
field = fields_by_json_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name, None)
if not field and _VALID_EXTENSION_NAME.match(name):
if not message_descriptor.is_extendable:
raise ParseError(
'Message type {0} does not have extensions at {1}'.format(
message_descriptor.full_name, path
)
)
identifier = name[1:-1] # strip [] brackets
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(identifier)
# pylint: enable=protected-access
if not field:
# Try looking for extension by the message type name, dropping the
# field name following the final . separator in full_name.
identifier = '.'.join(identifier.split('.')[:-1])
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(identifier)
# pylint: enable=protected-access
if not field:
if self.ignore_unknown_fields:
continue
raise ParseError(
(
'Message type "{0}" has no field named "{1}" at "{2}".\n'
' Available Fields(except extensions): "{3}"'
).format(
message_descriptor.full_name,
name,
path,
[f.json_name for f in message_descriptor.fields],
)
)
if name in names:
raise ParseError(
'Message type "{0}" should not have multiple '
'"{1}" fields at "{2}".'.format(
message.DESCRIPTOR.full_name, name, path
)
)
names.append(name)
value = js[name]
# Check no other oneof field is parsed.
if field.containing_oneof is not None and value is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError(
'Message type "{0}" should not have multiple '
'"{1}" oneof fields at "{2}".'.format(
message.DESCRIPTOR.full_name, oneof_name, path
)
)
names.append(oneof_name)
if value is None:
if (
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE
and field.message_type.full_name == 'google.protobuf.Value'
):
sub_message = _GetFieldOrExtension(message, field)
sub_message.null_value = 0
elif (
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM
and field.enum_type.full_name == 'google.protobuf.NullValue'
):
_SetFieldOrExtension(message, field, 0)
else:
_ClearFieldOrExtension(message, field)
continue
# Parse field value.
if _IsMapEntry(field):
_ClearFieldOrExtension(message, field)
self._ConvertMapFieldValue(
value, message, field, '{0}.{1}'.format(path, name)
)
elif field.is_repeated:
_ClearFieldOrExtension(message, field)
if not isinstance(value, _LIST_LIKE):
raise ParseError(
'repeated field {0} must be in [] which is {1} at {2}'.format(
name, value, path
)
)
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for index, item in enumerate(value):
sub_message = _GetFieldOrExtension(message, field).add()
# None is a null_value in Value.
if (
item is None
and sub_message.DESCRIPTOR.full_name
!= 'google.protobuf.Value'
):
raise ParseError(
'null is not allowed to be used as an element'
' in a repeated field at {0}.{1}[{2}]'.format(
path, name, index
)
)
self.ConvertMessage(
item, sub_message, '{0}.{1}[{2}]'.format(path, name, index)
)
else:
# Repeated scalar field.
for index, item in enumerate(value):
if item is None:
raise ParseError(
'null is not allowed to be used as an element'
' in a repeated field at {0}.{1}[{2}]'.format(
path, name, index
)
)
self._ConvertAndAppendScalar(
message, field, item, '{0}.{1}[{2}]'.format(path, name, index)
)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = _GetFieldOrExtension(message, field)
sub_message.SetInParent()
self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name))
else:
self._ConvertAndSetScalar(
message, field, value, '{0}.{1}'.format(path, name)
)
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError(
'Failed to parse {0} field: {1}.'.format(name, e)
) from e
else:
raise ParseError(str(e)) from e
except ValueError as e:
raise ParseError(
'Failed to parse {0} field: {1}.'.format(name, e)
) from e
except TypeError as e:
raise ParseError(
'Failed to parse {0} field: {1}.'.format(name, e)
) from e
def _ConvertAnyMessage(self, value, message, path):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError as e:
raise ParseError(
'@type is missing when parsing any message at {0}'.format(path)
) from e
try:
sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)
except TypeError as e:
raise ParseError('{0} at {1}'.format(e, path)) from e
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(
value['value'], sub_message, '{0}.value'.format(path)
)
elif full_name in _WKTJSONMETHODS:
methodcaller(
_WKTJSONMETHODS[full_name][1],
value['value'],
sub_message,
'{0}.value'.format(path),
)(self)
else:
del value['@type']
try:
self._ConvertFieldValuePair(value, sub_message, path)
finally:
value['@type'] = type_url
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(self, value, message, path):
"""Convert a JSON representation into message with FromJsonString."""
# Duration, Timestamp, FieldMask have a FromJsonString method to do the
# conversion. Users can also call the method directly.
try:
message.FromJsonString(value)
except ValueError as e:
raise ParseError('{0} at {1}'.format(e, path)) from e
def _ConvertValueMessage(self, value, message, path):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
self._ConvertStructMessage(value, message.struct_value, path)
elif isinstance(value, _LIST_LIKE):
self._ConvertListOrTupleValueMessage(value, message.list_value, path)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, str):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError(
'Value {0} has unexpected type {1} at {2}'.format(
value, type(value), path
)
)
def _ConvertListOrTupleValueMessage(self, value, message, path):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, _LIST_LIKE):
raise ParseError(
'ListValue must be in [] which is {0} at {1}'.format(value, path)
)
message.ClearField('values')
for index, item in enumerate(value):
self._ConvertValueMessage(
item, message.values.add(), '{0}[{1}]'.format(path, index)
)
def _ConvertStructMessage(self, value, message, path):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0} at {1}'.format(value, path)
)
# Clear will mark the struct as modified so it will be created even if
# there are no values.
message.Clear()
for key in value:
self._ConvertValueMessage(
value[key], message.fields[key], '{0}.{1}'.format(path, key)
)
return
def _ConvertWrapperMessage(self, value, message, path):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
self._ConvertAndSetScalar(
message, field, value, path='{0}.value'.format(path)
)
def _ConvertMapFieldValue(self, value, message, field, path):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
path: parent path to log parse error info.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1} at {2}'.format(
field.name, value, path
)
)
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(
key, key_field, '{0}.key'.format(path), True
)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(
value[key],
getattr(message, field.name)[key_value],
'{0}[{1}]'.format(path, key_value),
)
else:
self._ConvertAndSetScalarToMapKey(
message,
field,
key_value,
value[key],
path='{0}[{1}]'.format(path, key_value),
)
def _ConvertAndSetScalar(self, message, field, js_value, path):
"""Convert scalar from js_value and assign it to message.field."""
try:
value = _ConvertScalarFieldValue(js_value, field, path)
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
except EnumStringValueParseError:
if not self.ignore_unknown_fields:
raise
def _ConvertAndAppendScalar(self, message, repeated_field, js_value, path):
"""Convert scalar from js_value and append it to message.repeated_field."""
try:
if repeated_field.is_extension:
repeated = message.Extensions[repeated_field]
else:
repeated = getattr(message, repeated_field.name)
value = _ConvertScalarFieldValue(js_value, repeated_field, path)
repeated.append(value)
except EnumStringValueParseError:
if not self.ignore_unknown_fields:
raise
def _ConvertAndSetScalarToMapKey(
self, message, map_field, converted_key, js_value, path
):
"""Convert scalar from 'js_value' and add it to message.map_field[converted_key]."""
try:
getattr(message, map_field.name)[converted_key] = (
_ConvertScalarFieldValue(
js_value,
map_field.message_type.fields_by_name['value'],
path,
)
)
except EnumStringValueParseError:
if not self.ignore_unknown_fields:
raise
def _ConvertScalarFieldValue(value, field, path, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
path: parent path to log parse error info.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
EnumStringValueParseError: In case of unknown enum string value.
"""
try:
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value, field)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
if isinstance(value, str):
encoded = value.encode('utf-8')
else:
encoded = value
# Add extra padding '='
padded_value = encoded + b'=' * (4 - len(encoded) % 4)
return base64.urlsafe_b64decode(padded_value)
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError as e:
# Since parsing to integer failed and lookup in values_by_name didn't
# find this name, we have an enum string value which is unknown.
raise EnumStringValueParseError(
'Invalid enum value {0} for enum type {1}'.format(
value, field.enum_type.full_name
)
) from e
if enum_value is None:
if field.enum_type.is_closed:
raise ParseError(
'Invalid enum value {0} for enum type {1}'.format(
value, field.enum_type.full_name
)
)
else:
return number
return enum_value.number
except EnumStringValueParseError as e:
raise EnumStringValueParseError('{0} at {1}'.format(e, path)) from e
except ParseError as e:
raise ParseError('{0} at {1}'.format(e, path)) from e
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float) and not value.is_integer():
raise ParseError("Couldn't parse integer: {0}".format(value))
if isinstance(value, str) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}"'.format(value))
if isinstance(value, bool):
raise ParseError(
'Bool value {0} is not acceptable for integer field'.format(value)
)
try:
return int(value)
except ValueError as e:
# Attempt to parse as an integer-valued float.
try:
f = float(value)
except ValueError:
# Raise the original exception for the int parse.
raise e # pylint: disable=raise-missing-from
if not f.is_integer():
raise ParseError(
'Couldn\'t parse non-integer string: "{0}"'.format(value)
) from e
return int(f)
def _ConvertFloat(value, field):
"""Convert an floating point number."""
if isinstance(value, float):
if math.isnan(value):
raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead')
if math.isinf(value):
if value > 0:
raise ParseError(
"Couldn't parse Infinity or value too large, "
'use quoted "Infinity" instead'
)
else:
raise ParseError(
"Couldn't parse -Infinity or value too small, "
'use quoted "-Infinity" instead'
)
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT:
# pylint: disable=protected-access
if value > type_checkers._FLOAT_MAX:
raise ParseError('Float value too large')
# pylint: disable=protected-access
if value < type_checkers._FLOAT_MIN:
raise ParseError('Float value too small')
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError as e:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError("Couldn't parse float: {0}".format(value)) from e
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes')
return value
_WKTJSONMETHODS = {
'google.protobuf.Any': ['_AnyMessageToJsonObject', '_ConvertAnyMessage'],
'google.protobuf.Duration': [
'_GenericMessageToJsonObject',
'_ConvertGenericMessage',
],
'google.protobuf.FieldMask': [
'_GenericMessageToJsonObject',
'_ConvertGenericMessage',
],
'google.protobuf.ListValue': [
'_ListValueMessageToJsonObject',
'_ConvertListOrTupleValueMessage',
],
'google.protobuf.Struct': [
'_StructMessageToJsonObject',
'_ConvertStructMessage',
],
'google.protobuf.Timestamp': [
'_GenericMessageToJsonObject',
'_ConvertGenericMessage',
],
'google.protobuf.Value': [
'_ValueMessageToJsonObject',
'_ConvertValueMessage',
],
}
| _Parser |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 174002,
"end": 177308
} | class ____(Request):
"""
Adds a task into a queue.
Fails if task state is not 'created'.
Fails if the following parameters in the task were not filled:
* execution.script.repository
* execution.script.entrypoint
:param queue: Queue id. If not provided, task is added to the default queue.
:type queue: str
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "enqueue"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"queue": {
"description": "Queue id. If not provided, task is added to the default queue.",
"type": ["string", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
queue: Optional[str] = None,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(EnqueueRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("queue")
def queue(self) -> Optional[str]:
return self._property_queue
@queue.setter
def queue(self, value: Optional[str]) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| EnqueueRequest |
python | redis__redis-py | redis/asyncio/cluster.py | {
"start": 54353,
"end": 67252
} | class ____:
__slots__ = (
"_dynamic_startup_nodes",
"_moved_exception",
"_event_dispatcher",
"connection_kwargs",
"default_node",
"nodes_cache",
"read_load_balancer",
"require_full_coverage",
"slots_cache",
"startup_nodes",
"address_remap",
)
def __init__(
self,
startup_nodes: List["ClusterNode"],
require_full_coverage: bool,
connection_kwargs: Dict[str, Any],
dynamic_startup_nodes: bool = True,
address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
event_dispatcher: Optional[EventDispatcher] = None,
) -> None:
self.startup_nodes = {node.name: node for node in startup_nodes}
self.require_full_coverage = require_full_coverage
self.connection_kwargs = connection_kwargs
self.address_remap = address_remap
self.default_node: "ClusterNode" = None
self.nodes_cache: Dict[str, "ClusterNode"] = {}
self.slots_cache: Dict[int, List["ClusterNode"]] = {}
self.read_load_balancer = LoadBalancer()
self._dynamic_startup_nodes: bool = dynamic_startup_nodes
self._moved_exception: MovedError = None
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
def get_node(
self,
host: Optional[str] = None,
port: Optional[int] = None,
node_name: Optional[str] = None,
) -> Optional["ClusterNode"]:
if host and port:
# the user passed host and port
if host == "localhost":
host = socket.gethostbyname(host)
return self.nodes_cache.get(get_node_name(host=host, port=port))
elif node_name:
return self.nodes_cache.get(node_name)
else:
raise DataError(
"get_node requires one of the following: 1. node name 2. host and port"
)
def set_nodes(
self,
old: Dict[str, "ClusterNode"],
new: Dict[str, "ClusterNode"],
remove_old: bool = False,
) -> None:
if remove_old:
for name in list(old.keys()):
if name not in new:
task = asyncio.create_task(old.pop(name).disconnect()) # noqa
for name, node in new.items():
if name in old:
if old[name] is node:
continue
task = asyncio.create_task(old[name].disconnect()) # noqa
old[name] = node
def update_moved_exception(self, exception):
self._moved_exception = exception
def _update_moved_slots(self) -> None:
e = self._moved_exception
redirected_node = self.get_node(host=e.host, port=e.port)
if redirected_node:
# The node already exists
if redirected_node.server_type != PRIMARY:
# Update the node's server type
redirected_node.server_type = PRIMARY
else:
# This is a new node, we will add it to the nodes cache
redirected_node = ClusterNode(
e.host, e.port, PRIMARY, **self.connection_kwargs
)
self.set_nodes(self.nodes_cache, {redirected_node.name: redirected_node})
if redirected_node in self.slots_cache[e.slot_id]:
# The MOVED error resulted from a failover, and the new slot owner
# had previously been a replica.
old_primary = self.slots_cache[e.slot_id][0]
# Update the old primary to be a replica and add it to the end of
# the slot's node list
old_primary.server_type = REPLICA
self.slots_cache[e.slot_id].append(old_primary)
# Remove the old replica, which is now a primary, from the slot's
# node list
self.slots_cache[e.slot_id].remove(redirected_node)
# Override the old primary with the new one
self.slots_cache[e.slot_id][0] = redirected_node
if self.default_node == old_primary:
# Update the default node with the new primary
self.default_node = redirected_node
else:
# The new slot owner is a new server, or a server from a different
# shard. We need to remove all current nodes from the slot's list
# (including replications) and add just the new node.
self.slots_cache[e.slot_id] = [redirected_node]
# Reset moved_exception
self._moved_exception = None
def get_node_from_slot(
self,
slot: int,
read_from_replicas: bool = False,
load_balancing_strategy=None,
) -> "ClusterNode":
if self._moved_exception:
self._update_moved_slots()
if read_from_replicas is True and load_balancing_strategy is None:
load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
try:
if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
# get the server index using the strategy defined in load_balancing_strategy
primary_name = self.slots_cache[slot][0].name
node_idx = self.read_load_balancer.get_server_index(
primary_name, len(self.slots_cache[slot]), load_balancing_strategy
)
return self.slots_cache[slot][node_idx]
return self.slots_cache[slot][0]
except (IndexError, TypeError):
raise SlotNotCoveredError(
f'Slot "{slot}" not covered by the cluster. '
f'"require_full_coverage={self.require_full_coverage}"'
)
def get_nodes_by_server_type(self, server_type: str) -> List["ClusterNode"]:
return [
node
for node in self.nodes_cache.values()
if node.server_type == server_type
]
async def initialize(self) -> None:
self.read_load_balancer.reset()
tmp_nodes_cache: Dict[str, "ClusterNode"] = {}
tmp_slots: Dict[int, List["ClusterNode"]] = {}
disagreements = []
startup_nodes_reachable = False
fully_covered = False
exception = None
# Convert to tuple to prevent RuntimeError if self.startup_nodes
# is modified during iteration
for startup_node in tuple(self.startup_nodes.values()):
try:
# Make sure cluster mode is enabled on this node
try:
self._event_dispatcher.dispatch(
AfterAsyncClusterInstantiationEvent(
self.nodes_cache,
self.connection_kwargs.get("credential_provider", None),
)
)
cluster_slots = await startup_node.execute_command("CLUSTER SLOTS")
except ResponseError:
raise RedisClusterException(
"Cluster mode is not enabled on this node"
)
startup_nodes_reachable = True
except Exception as e:
# Try the next startup node.
# The exception is saved and raised only if we have no more nodes.
exception = e
continue
# CLUSTER SLOTS command results in the following output:
# [[slot_section[from_slot,to_slot,master,replica1,...,replicaN]]]
# where each node contains the following list: [IP, port, node_id]
# Therefore, cluster_slots[0][2][0] will be the IP address of the
# primary node of the first slot section.
# If there's only one server in the cluster, its ``host`` is ''
# Fix it to the host in startup_nodes
if (
len(cluster_slots) == 1
and not cluster_slots[0][2][0]
and len(self.startup_nodes) == 1
):
cluster_slots[0][2][0] = startup_node.host
for slot in cluster_slots:
for i in range(2, len(slot)):
slot[i] = [str_if_bytes(val) for val in slot[i]]
primary_node = slot[2]
host = primary_node[0]
if host == "":
host = startup_node.host
port = int(primary_node[1])
host, port = self.remap_host_port(host, port)
nodes_for_slot = []
target_node = tmp_nodes_cache.get(get_node_name(host, port))
if not target_node:
target_node = ClusterNode(
host, port, PRIMARY, **self.connection_kwargs
)
# add this node to the nodes cache
tmp_nodes_cache[target_node.name] = target_node
nodes_for_slot.append(target_node)
replica_nodes = slot[3:]
for replica_node in replica_nodes:
host = replica_node[0]
port = replica_node[1]
host, port = self.remap_host_port(host, port)
target_replica_node = tmp_nodes_cache.get(get_node_name(host, port))
if not target_replica_node:
target_replica_node = ClusterNode(
host, port, REPLICA, **self.connection_kwargs
)
# add this node to the nodes cache
tmp_nodes_cache[target_replica_node.name] = target_replica_node
nodes_for_slot.append(target_replica_node)
for i in range(int(slot[0]), int(slot[1]) + 1):
if i not in tmp_slots:
tmp_slots[i] = nodes_for_slot
else:
# Validate that 2 nodes want to use the same slot cache
# setup
tmp_slot = tmp_slots[i][0]
if tmp_slot.name != target_node.name:
disagreements.append(
f"{tmp_slot.name} vs {target_node.name} on slot: {i}"
)
if len(disagreements) > 5:
raise RedisClusterException(
f"startup_nodes could not agree on a valid "
f"slots cache: {', '.join(disagreements)}"
)
# Validate if all slots are covered or if we should try next startup node
fully_covered = True
for i in range(REDIS_CLUSTER_HASH_SLOTS):
if i not in tmp_slots:
fully_covered = False
break
if fully_covered:
break
if not startup_nodes_reachable:
raise RedisClusterException(
f"Redis Cluster cannot be connected. Please provide at least "
f"one reachable node: {str(exception)}"
) from exception
# Check if the slots are not fully covered
if not fully_covered and self.require_full_coverage:
# Despite the requirement that the slots be covered, there
# isn't a full coverage
raise RedisClusterException(
f"All slots are not covered after query all startup_nodes. "
f"{len(tmp_slots)} of {REDIS_CLUSTER_HASH_SLOTS} "
f"covered..."
)
# Set the tmp variables to the real variables
self.slots_cache = tmp_slots
self.set_nodes(self.nodes_cache, tmp_nodes_cache, remove_old=True)
if self._dynamic_startup_nodes:
# Populate the startup nodes with all discovered nodes
self.set_nodes(self.startup_nodes, self.nodes_cache, remove_old=True)
# Set the default node
self.default_node = self.get_nodes_by_server_type(PRIMARY)[0]
# If initialize was called after a MovedError, clear it
self._moved_exception = None
async def aclose(self, attr: str = "nodes_cache") -> None:
self.default_node = None
await asyncio.gather(
*(
asyncio.create_task(node.disconnect())
for node in getattr(self, attr).values()
)
)
def remap_host_port(self, host: str, port: int) -> Tuple[str, int]:
"""
Remap the host and port returned from the cluster to a different
internal value. Useful if the client is not connecting directly
to the cluster.
"""
if self.address_remap:
return self.address_remap((host, port))
return host, port
| NodesManager |
python | doocs__leetcode | solution/2700-2799/2768.Number of Black Blocks/Solution.py | {
"start": 0,
"end": 509
} | class ____:
def countBlackBlocks(
self, m: int, n: int, coordinates: List[List[int]]
) -> List[int]:
cnt = Counter()
for x, y in coordinates:
for a, b in pairwise((0, 0, -1, -1, 0)):
i, j = x + a, y + b
if 0 <= i < m - 1 and 0 <= j < n - 1:
cnt[(i, j)] += 1
ans = [0] * 5
for x in cnt.values():
ans[x] += 1
ans[0] = (m - 1) * (n - 1) - len(cnt.values())
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 27398,
"end": 28542
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
query_length=0,
**kwargs: Unpack[TransformersKwargs],
):
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
query_length=query_length,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
)
| InstructBlipQFormerEncoder |
python | apache__thrift | test/py/SerializationTest.py | {
"start": 13336,
"end": 13470
} | class ____(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False)
| AcceleratedBinaryTest |
python | sqlalchemy__sqlalchemy | test/orm/test_events.py | {
"start": 35379,
"end": 57272
} | class ____(RemoveORMEventsGlobally, _fixtures.FixtureTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
super().define_tables(metadata)
metadata.tables["users"].append_column(
Column("extra", Integer, default=5, onupdate=10)
)
def test_instance_event_listen(self):
"""test listen targets for instance events"""
users, addresses = self.tables.users, self.tables.addresses
canary = []
class A:
pass
class B(A):
pass
self.mapper_registry.map_imperatively(A, users)
self.mapper_registry.map_imperatively(
B, addresses, inherits=A, properties={"address_id": addresses.c.id}
)
def init_a(target, args, kwargs):
canary.append(("init_a", target))
def init_b(target, args, kwargs):
canary.append(("init_b", target))
def init_c(target, args, kwargs):
canary.append(("init_c", target))
def init_d(target, args, kwargs):
canary.append(("init_d", target))
def init_e(target, args, kwargs):
canary.append(("init_e", target))
event.listen(Mapper, "init", init_a)
event.listen(class_mapper(A), "init", init_b)
event.listen(A, "init", init_c)
event.listen(A, "init", init_d, propagate=True)
a = A()
eq_(
canary,
[
("init_a", a),
("init_b", a),
("init_c", a),
("init_d", a),
],
)
# test propagate flag
canary[:] = []
b = B()
eq_(canary, [("init_a", b), ("init_d", b)])
def listen_all(self, mapper, **kw):
canary = []
def evt(meth):
def go(*args, **kwargs):
canary.append(meth)
return go
for meth in [
"init",
"init_failure",
"load",
"refresh",
"refresh_flush",
"expire",
"before_insert",
"after_insert",
"before_update",
"after_update",
"before_delete",
"after_delete",
]:
event.listen(mapper, meth, evt(meth), **kw)
return canary
def test_init_allow_kw_modify(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
@event.listens_for(User, "init")
def add_name(obj, args, kwargs):
kwargs["name"] = "ed"
u1 = User()
eq_(u1.name, "ed")
def test_init_failure_hook(self):
users = self.tables.users
class Thing:
def __init__(self, **kw):
if kw.get("fail"):
raise Exception("failure")
self.mapper_registry.map_imperatively(Thing, users)
canary = Mock()
event.listen(Thing, "init_failure", canary)
Thing()
eq_(canary.mock_calls, [])
assert_raises_message(Exception, "failure", Thing, fail=True)
eq_(canary.mock_calls, [call(ANY, (), {"fail": True})])
def test_listen_doesnt_force_compile(self):
User, users = self.classes.User, self.tables.users
m = self.mapper_registry.map_imperatively(
User,
users,
properties={
# intentionally non-existent class to ensure
# the lambda is not called, simulates a class from
# a not-yet-imported module
"addresses": relationship(lambda: ImNotAClass) # noqa
},
)
event.listen(User, "before_insert", lambda *a, **kw: None)
assert not m.configured
def test_basic(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
canary = self.listen_all(User)
named_canary = self.listen_all(User, named=True)
sess = fixture_session()
u = User(name="u1")
sess.add(u)
sess.flush()
sess.expire(u)
u = sess.get(User, u.id)
sess.expunge_all()
u = sess.get(User, u.id)
u.name = "u1 changed"
sess.flush()
sess.delete(u)
sess.flush()
expected = [
"init",
"before_insert",
"refresh_flush",
"after_insert",
"expire",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
]
eq_(canary, expected)
eq_(named_canary, expected)
def test_insert_before_configured(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
canary = Mock()
event.listen(Mapper, "before_configured", canary.listen1)
event.listen(Mapper, "before_configured", canary.listen2, insert=True)
event.listen(Mapper, "before_configured", canary.listen3)
event.listen(Mapper, "before_configured", canary.listen4, insert=True)
configure_mappers()
eq_(
canary.mock_calls,
[call.listen4(), call.listen2(), call.listen1(), call.listen3()],
)
def test_insert_flags(self):
users, User = self.tables.users, self.classes.User
m = self.mapper_registry.map_imperatively(User, users)
canary = Mock()
arg = Mock()
event.listen(m, "before_insert", canary.listen1)
event.listen(m, "before_insert", canary.listen2, insert=True)
event.listen(
m, "before_insert", canary.listen3, propagate=True, insert=True
)
event.listen(m, "load", canary.listen4)
event.listen(m, "load", canary.listen5, insert=True)
event.listen(m, "load", canary.listen6, propagate=True, insert=True)
User()
m.dispatch.before_insert(arg, arg, arg)
m.class_manager.dispatch.load(arg, arg)
eq_(
canary.mock_calls,
[
call.listen3(arg, arg, arg.obj()),
call.listen2(arg, arg, arg.obj()),
call.listen1(arg, arg, arg.obj()),
call.listen6(arg.obj(), arg),
call.listen5(arg.obj(), arg),
call.listen4(arg.obj(), arg),
],
)
def test_merge(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
canary = []
def load(obj, ctx):
canary.append("load")
event.listen(Mapper, "load", load)
s = fixture_session()
u = User(name="u1")
s.add(u)
s.commit()
s = fixture_session()
u2 = s.merge(u)
s = fixture_session()
u2 = s.merge(User(name="u2")) # noqa
s.commit()
s.query(User).order_by(User.id).first()
eq_(canary, ["load", "load", "load"])
def test_inheritance(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class AdminUser(User):
pass
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
canary3 = self.listen_all(AdminUser)
sess = fixture_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.get(AdminUser, am.id, populate_existing=True)
sess.expunge_all()
am = sess.get(AdminUser, am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
canary1,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
eq_(canary2, [])
eq_(
canary3,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
def test_inheritance_subclass_deferred(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
class AdminUser(User):
pass
self.mapper_registry.map_imperatively(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
canary3 = self.listen_all(AdminUser)
sess = fixture_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.get(AdminUser, am.id, populate_existing=True)
sess.expunge_all()
am = sess.get(AdminUser, am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
canary1,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
eq_(canary2, [])
eq_(
canary3,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
canary1 = self.listen_all(Item)
canary2 = self.listen_all(Keyword)
sess = fixture_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(canary1, ["init", "before_insert", "after_insert"])
eq_(canary2, ["init", "before_insert", "after_insert"])
canary1[:] = []
canary2[:] = []
i1.keywords.append(k1)
sess.flush()
eq_(canary1, ["before_update", "after_update"])
eq_(canary2, [])
@testing.combinations(
("before_configured",), ("after_configured",), argnames="event_name"
)
@testing.variation(
"target_type",
[
"mappercls",
"mapperinstance",
"registry",
"explicit_base",
"imperative_class",
"declarative_class",
],
)
def test_before_after_configured_only_on_mappercls_or_registry(
self, event_name, target_type: testing.Variation
):
User, users = self.classes.User, self.tables.users
reg = registry()
expect_success = (
target_type.mappercls
or target_type.registry
or target_type.explicit_base
)
if target_type.mappercls:
target = Mapper
elif target_type.mapperinstance:
reg.map_imperatively(User, users)
target = inspect(User)
elif target_type.registry:
target = reg
elif target_type.imperative_class:
reg.map_imperatively(User, users)
target = User
elif target_type.explicit_base:
class Base(DeclarativeBase):
registry = reg
target = Base
elif target_type.declarative_class:
class Base(DeclarativeBase):
registry = reg
class User(Base):
__table__ = users
target = User
else:
target_type.fail()
m1 = Mock()
if expect_success:
event.listen(target, event_name, m1)
else:
with expect_raises_message(
sa_exc.InvalidRequestError,
re.escape(
f"No such event {event_name!r} for target '{target}'"
),
):
event.listen(target, event_name, m1)
def test_before_after_configured(self):
User, users = self.classes.User, self.tables.users
m1 = Mock()
m2 = Mock()
self.mapper_registry.map_imperatively(User, users)
event.listen(Mapper, "before_configured", m1)
event.listen(Mapper, "after_configured", m2)
inspect(User)._post_inspect
eq_(m1.mock_calls, [call()])
eq_(m2.mock_calls, [call()])
def test_instrument_event(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
canary = []
def instrument_class(mapper, cls):
canary.append(cls)
event.listen(Mapper, "instrument_class", instrument_class)
self.mapper_registry.map_imperatively(User, users)
eq_(canary, [User])
self.mapper_registry.map_imperatively(Address, addresses)
eq_(canary, [User, Address])
def test_instrument_class_precedes_class_instrumentation(self):
users = self.tables.users
class MyClass:
pass
class MySubClass(MyClass):
pass
canary = Mock()
def my_init(self):
canary.init()
# mapper level event
@event.listens_for(Mapper, "instrument_class")
def instrument_class(mp, class_):
canary.instrument_class(class_)
class_.__init__ = my_init
# instrumentationmanager event
@event.listens_for(object, "class_instrument")
def class_instrument(class_):
canary.class_instrument(class_)
self.mapper_registry.map_imperatively(MyClass, users)
self.mapper_registry.map_imperatively(MySubClass, inherits=MyClass)
m1 = MyClass()
assert attributes.instance_state(m1)
m2 = MySubClass()
assert attributes.instance_state(m2)
eq_(
[
call.instrument_class(MyClass),
call.class_instrument(MyClass),
call.instrument_class(MySubClass),
call.class_instrument(MySubClass),
call.init(),
call.init(),
],
canary.mock_calls,
)
@testing.variation(
"listen_type",
["listen_on_mapper", "listen_on_base", "listen_on_mixin"],
)
def test_mapper_config_sequence(self, decl_base, listen_type):
canary = Mock()
if listen_type.listen_on_mapper:
event.listen(Mapper, "instrument_class", canary.instrument_class)
event.listen(
Mapper,
"after_mapper_constructed",
canary.after_mapper_constructed,
)
elif listen_type.listen_on_base:
event.listen(
decl_base,
"instrument_class",
canary.instrument_class,
propagate=True,
)
event.listen(
decl_base,
"after_mapper_constructed",
canary.after_mapper_constructed,
propagate=True,
)
elif listen_type.listen_on_mixin:
class Mixin:
pass
event.listen(
Mixin,
"instrument_class",
canary.instrument_class,
propagate=True,
)
event.listen(
Mixin,
"after_mapper_constructed",
canary.after_mapper_constructed,
propagate=True,
)
else:
listen_type.fail()
event.listen(object, "class_instrument", canary.class_instrument)
event.listen(Mapper, "before_configured", canary.before_configured)
event.listen(
Mapper, "before_mapper_configured", canary.before_mapper_configured
)
event.listen(Mapper, "after_configured", canary.after_configured)
if listen_type.listen_on_mixin:
class Thing(Mixin, decl_base):
__tablename__ = "thing"
id = Column(Integer, primary_key=True)
else:
class Thing(decl_base):
__tablename__ = "thing"
id = Column(Integer, primary_key=True)
mp = inspect(Thing)
eq_(
canary.mock_calls,
[
call.instrument_class(mp, Thing),
call.class_instrument(Thing),
call.after_mapper_constructed(mp, Thing),
],
)
decl_base.registry.configure()
eq_(
canary.mock_calls,
[
call.instrument_class(mp, Thing),
call.class_instrument(Thing),
call.after_mapper_constructed(mp, Thing),
call.before_configured(),
call.before_mapper_configured(mp, Thing),
call.after_configured(),
],
)
@testing.combinations((True,), (False,), argnames="create_dependency")
@testing.combinations((True,), (False,), argnames="configure_at_once")
def test_before_mapper_configured_event(
self, create_dependency, configure_at_once
):
"""Test [ticket:4397].
This event is intended to allow a specific mapper to be skipped during
the configure step, by returning a value of
:attr:`.orm.interfaces.EXT_SKIP` which means the mapper will be skipped
within this configure run. The "new mappers" flag will remain set in
this case and the configure operation will occur again.
This event, and its return value, make it possible to query one base
while a different one still needs configuration, which cannot be
completed at this time.
"""
User, users = self.classes.User, self.tables.users
ump = self.mapper_registry.map_imperatively(User, users)
AnotherBase = declarative_base()
class Animal(AnotherBase):
__tablename__ = "animal"
species = Column(String(30), primary_key=True)
__mapper_args__ = dict(
polymorphic_on="species", polymorphic_identity="Animal"
)
if create_dependency:
user_id = Column("user_id", ForeignKey(users.c.id))
if not configure_at_once:
# Register the first classes and create their Mappers:
configure_mappers()
unconfigured = list(mapperlib._unconfigured_mappers())
eq_(0, len(unconfigured))
if create_dependency:
ump.add_property("animal", relationship(Animal))
# Declare a subclass, table and mapper, which refers to one that has
# not been loaded yet (Employer), and therefore cannot be configured:
class Mammal(Animal):
nonexistent = relationship("Nonexistent")
__mapper_args__ = {"polymorphic_identity": "mammal"}
# These new classes should not be configured at this point:
unconfigured = list(mapperlib._unconfigured_mappers())
if configure_at_once:
eq_(3, len(unconfigured))
else:
eq_(1, len(unconfigured))
# Now try to query User, which is internally consistent. This query
# fails by default because Mammal needs to be configured, and cannot
# be:
def probe():
inspect(User)._post_inspect
if create_dependency:
assert_raises(sa.exc.InvalidRequestError, probe)
else:
probe()
# If we disable configuring mappers while querying, then it succeeds:
@event.listens_for(
AnotherBase,
"before_mapper_configured",
propagate=True,
retval=True,
)
def disable_configure_mappers(mapper, cls):
return EXT_SKIP
probe()
| MapperEventsTest |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_param_kind.py | {
"start": 383,
"end": 4438
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind'
}
def __init__(self, api_version=None, kind=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ParamKind - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
@property
def api_version(self):
"""Gets the api_version of this V1beta1ParamKind. # noqa: E501
APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
:return: The api_version of this V1beta1ParamKind. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1ParamKind.
APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
:param api_version: The api_version of this V1beta1ParamKind. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1ParamKind. # noqa: E501
Kind is the API kind the resources belong to. Required. # noqa: E501
:return: The kind of this V1beta1ParamKind. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1ParamKind.
Kind is the API kind the resources belong to. Required. # noqa: E501
:param kind: The kind of this V1beta1ParamKind. # noqa: E501
:type: str
"""
self._kind = kind
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ParamKind):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ParamKind):
return True
return self.to_dict() != other.to_dict()
| V1beta1ParamKind |
python | django__django | django/middleware/common.py | {
"start": 373,
"end": 5101
} | class ____(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbid access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
append missing slashes and/or prepends missing "www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, form a new URL by
appending a slash at the end. If this new URL is found in
urlpatterns, return an HTTP redirect to this new URL; otherwise
process the initial URL as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
"""
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
user_agent = request.META.get("HTTP_USER_AGENT")
if user_agent is not None:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(user_agent):
raise PermissionDenied("Forbidden user agent")
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
if settings.PREPEND_WWW and host and not host.startswith("www."):
# Check if we also need to append a slash so we can do it all
# with a single redirect. (This check may be somewhat expensive,
# so we only do it if we already know we're sending a redirect,
# or in process_response if we get a 404.)
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
return self.response_redirect_class(f"{request.scheme}://www.{host}{path}")
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.path_info.endswith("/"):
urlconf = getattr(request, "urlconf", None)
if not is_valid_path(request.path_info, urlconf):
match = is_valid_path("%s/" % request.path_info, urlconf)
if match:
view = match.func
return getattr(view, "should_append_slash", True)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
DELETE, POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
# Prevent construction of scheme relative urls.
new_path = escape_leading_slashes(new_path)
if settings.DEBUG and request.method in ("DELETE", "POST", "PUT", "PATCH"):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings."
% {
"method": request.method,
"url": request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404 and self.should_redirect_with_slash(request):
response = self.response_redirect_class(
self.get_full_path_with_slash(request)
)
# Add the Content-Length header to non-streaming responses if not
# already set.
if not response.streaming and not response.has_header("Content-Length"):
response.headers["Content-Length"] = str(len(response.content))
return response
| CommonMiddleware |
python | walkccc__LeetCode | solutions/1419. Minimum Number of Frogs Croaking/1419.py | {
"start": 0,
"end": 421
} | class ____:
def minNumberOfFrogs(self, croakOfFrogs: str) -> int:
CROAK = 'croak'
ans = 0
frogs = 0
count = [0] * 5
for c in croakOfFrogs:
count[CROAK.index(c)] += 1
if any(count[i] > count[i - 1] for i in range(1, 5)):
return -1
if c == 'c':
frogs += 1
elif c == 'k':
frogs -= 1
ans = max(ans, frogs)
return ans if frogs == 0 else -1
| Solution |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 27702,
"end": 30159
} | class ____(unittest.TestCase):
def _callFUT(self, resource, *elements):
from pyramid.traversal import resource_path
return resource_path(resource, *elements)
def test_it(self):
baz = DummyContext()
bar = DummyContext(baz)
foo = DummyContext(bar)
root = DummyContext(foo)
root.__parent__ = None
root.__name__ = None
foo.__parent__ = root
foo.__name__ = 'foo '
bar.__parent__ = foo
bar.__name__ = 'bar'
baz.__parent__ = bar
baz.__name__ = 'baz'
result = self._callFUT(baz, 'this/theotherthing', 'that')
self.assertEqual(result, '/foo%20/bar/baz/this%2Ftheotherthing/that')
def test_root_default(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = None
result = self._callFUT(root)
self.assertEqual(result, '/')
def test_root_default_emptystring(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = ''
result = self._callFUT(root)
self.assertEqual(result, '/')
def test_root_object_nonnull_name_direct(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = 'flubadub'
result = self._callFUT(root)
self.assertEqual(result, 'flubadub') # insane case
def test_root_object_nonnull_name_indirect(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = 'flubadub'
other = DummyContext()
other.__parent__ = root
other.__name__ = 'barker'
result = self._callFUT(other)
self.assertEqual(result, 'flubadub/barker') # insane case
def test_nonroot_default(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = None
other = DummyContext()
other.__parent__ = root
other.__name__ = 'other'
result = self._callFUT(other)
self.assertEqual(result, '/other')
def test_path_with_None_itermediate_names(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = None
other = DummyContext()
other.__parent__ = root
other.__name__ = None
other2 = DummyContext()
other2.__parent__ = other
other2.__name__ = 'other2'
result = self._callFUT(other2)
self.assertEqual(result, '//other2')
| ResourcePathTests |
python | pypa__pip | src/pip/_vendor/resolvelib/reporters.py | {
"start": 212,
"end": 2037
} | class ____(Generic[RT, CT, KT]):
"""Delegate class to provide progress reporting for the resolver."""
def starting(self) -> None:
"""Called before the resolution actually starts."""
def starting_round(self, index: int) -> None:
"""Called before each round of resolution starts.
The index is zero-based.
"""
def ending_round(self, index: int, state: State[RT, CT, KT]) -> None:
"""Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
"""
def ending(self, state: State[RT, CT, KT]) -> None:
"""Called before the resolution ends successfully."""
def adding_requirement(self, requirement: RT, parent: CT | None) -> None:
"""Called when adding a new requirement into the resolve criteria.
:param requirement: The additional requirement to be applied to filter
the available candidaites.
:param parent: The candidate that requires ``requirement`` as a
dependency, or None if ``requirement`` is one of the root
requirements passed in from ``Resolver.resolve()``.
"""
def resolving_conflicts(
self, causes: Collection[RequirementInformation[RT, CT]]
) -> None:
"""Called when starting to attempt requirement conflict resolution.
:param causes: The information on the collision that caused the backtracking.
"""
def rejecting_candidate(self, criterion: Criterion[RT, CT], candidate: CT) -> None:
"""Called when rejecting a candidate during backtracking."""
def pinning(self, candidate: CT) -> None:
"""Called when adding a candidate to the potential solution."""
| BaseReporter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/plugin/plugin_base.py | {
"start": 22248,
"end": 23101
} | class ____(abc.ABC):
@abc.abstractmethod
def skip_test_exception(self, *arg, **kw):
raise NotImplementedError()
@abc.abstractmethod
def combinations(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def param_ident(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def fixture(self, *arg, **kw):
raise NotImplementedError()
def get_current_test_name(self):
raise NotImplementedError()
@abc.abstractmethod
def mark_base_test_class(self) -> Any:
raise NotImplementedError()
@abc.abstractproperty
def add_to_marker(self):
raise NotImplementedError()
_fixture_fn_class = None
def set_fixture_functions(fixture_fn_class):
global _fixture_fn_class
_fixture_fn_class = fixture_fn_class
| FixtureFunctions |
python | doocs__leetcode | solution/2000-2099/2081.Sum of k-Mirror Numbers/Solution.py | {
"start": 0,
"end": 708
} | class ____:
def kMirror(self, k: int, n: int) -> int:
def check(x: int, k: int) -> bool:
s = []
while x:
s.append(x % k)
x //= k
return s == s[::-1]
ans = 0
for l in count(1):
x = 10 ** ((l - 1) // 2)
y = 10 ** ((l + 1) // 2)
for i in range(x, y):
v = i
j = i if l % 2 == 0 else i // 10
while j > 0:
v = v * 10 + j % 10
j //= 10
if check(v, k):
ans += v
n -= 1
if n == 0:
return ans
| Solution |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 12351,
"end": 12753
} | class ____(BaseSafeMigrationTest):
app = "bad_flow_delete_field_without_pending_app"
migrate_from = "0001"
migrate_to = "0002"
def test(self) -> None:
with pytest.raises(
UnsafeOperationException,
match="Field must be in the pending deletion state before full deletion",
):
self.run_migration()
| DeletionFieldBadDeleteWithoutPendingTest |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_tk.py | {
"start": 25999,
"end": 40595
} | class ____(NavigationToolbar2, tk.Frame):
def __init__(self, canvas, window=None, *, pack_toolbar=True):
"""
Parameters
----------
canvas : `FigureCanvas`
The figure canvas on which to operate.
window : tk.Window
The tk.Window which owns this toolbar.
pack_toolbar : bool, default: True
If True, add the toolbar to the parent's pack manager's packing
list during initialization with ``side="bottom"`` and ``fill="x"``.
If you want to use the toolbar with a different layout manager, use
``pack_toolbar=False``.
"""
if window is None:
window = canvas.get_tk_widget().master
tk.Frame.__init__(self, master=window, borderwidth=2,
width=int(canvas.figure.bbox.width), height=50)
# Avoid message_label expanding the toolbar size, and in turn expanding the
# canvas size.
# Without pack_propagate(False), when the user defines a small figure size
# (e.g. 2x2):
# 1. Figure size that is bigger than the user's expectation.
# 2. When message_label is refreshed by mouse enter/leave, the canvas
# size will also be changed.
self.pack_propagate(False)
self._buttons = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# Add a spacer; return value is unused.
self._Spacer()
else:
self._buttons[text] = button = self._Button(
text,
str(cbook._get_data_path(f"images/{image_file}.png")),
toggle=callback in ["zoom", "pan"],
command=getattr(self, callback),
)
if tooltip_text is not None:
add_tooltip(button, tooltip_text)
self._label_font = tkinter.font.Font(root=window, size=10)
# This filler item ensures the toolbar is always at least two text
# lines high. Otherwise the canvas gets redrawn as the mouse hovers
# over images because those use two-line messages which resize the
# toolbar.
label = tk.Label(master=self, font=self._label_font,
text='\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}')
label.pack(side=tk.RIGHT)
self.message = tk.StringVar(master=self)
self._message_label = tk.Label(master=self, font=self._label_font,
textvariable=self.message,
justify=tk.RIGHT)
self._message_label.pack(side=tk.RIGHT)
NavigationToolbar2.__init__(self, canvas)
if pack_toolbar:
self.pack(side=tk.BOTTOM, fill=tk.X)
def _rescale(self):
"""
Scale all children of the toolbar to current DPI setting.
Before this is called, the Tk scaling setting will have been updated to
match the new DPI. Tk widgets do not update for changes to scaling, but
all measurements made after the change will match the new scaling. Thus
this function re-applies all the same sizes in points, which Tk will
scale correctly to pixels.
"""
for widget in self.winfo_children():
if isinstance(widget, (tk.Button, tk.Checkbutton)):
if hasattr(widget, '_image_file'):
# Explicit class because ToolbarTk calls _rescale.
NavigationToolbar2Tk._set_image_for_button(self, widget)
else:
# Text-only button is handled by the font setting instead.
pass
elif isinstance(widget, tk.Frame):
widget.configure(height='18p')
widget.pack_configure(padx='3p')
elif isinstance(widget, tk.Label):
pass # Text is handled by the font setting instead.
else:
_log.warning('Unknown child class %s', widget.winfo_class)
self._label_font.configure(size=10)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
for text, mode in [('Zoom', _Mode.ZOOM), ('Pan', _Mode.PAN)]:
if text in self._buttons:
if self.mode == mode:
self._buttons[text].select() # NOT .invoke()
else:
self._buttons[text].deselect()
def pan(self, *args):
super().pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super().zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
# Block copied from remove_rubberband for backend_tools convenience.
if self.canvas._rubberband_rect_white:
self.canvas._tkcanvas.delete(self.canvas._rubberband_rect_white)
if self.canvas._rubberband_rect_black:
self.canvas._tkcanvas.delete(self.canvas._rubberband_rect_black)
height = self.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
self.canvas._rubberband_rect_black = (
self.canvas._tkcanvas.create_rectangle(
x0, y0, x1, y1, outline='black'))
self.canvas._rubberband_rect_white = (
self.canvas._tkcanvas.create_rectangle(
x0, y0, x1, y1, outline='white', dash=(3, 3)))
def remove_rubberband(self):
if self.canvas._rubberband_rect_white:
self.canvas._tkcanvas.delete(self.canvas._rubberband_rect_white)
self.canvas._rubberband_rect_white = None
if self.canvas._rubberband_rect_black:
self.canvas._tkcanvas.delete(self.canvas._rubberband_rect_black)
self.canvas._rubberband_rect_black = None
def _set_image_for_button(self, button):
"""
Set the image for a button based on its pixel size.
The pixel size is determined by the DPI scaling of the window.
"""
if button._image_file is None:
return
# Allow _image_file to be relative to Matplotlib's "images" data
# directory.
path_regular = cbook._get_data_path('images', button._image_file)
path_large = path_regular.with_name(
path_regular.name.replace('.png', '_large.png'))
size = button.winfo_pixels('18p')
# Nested functions because ToolbarTk calls _Button.
def _get_color(color_name):
# `winfo_rgb` returns an (r, g, b) tuple in the range 0-65535
return button.winfo_rgb(button.cget(color_name))
def _is_dark(color):
if isinstance(color, str):
color = _get_color(color)
return max(color) < 65535 / 2
def _recolor_icon(image, color):
image_data = np.asarray(image).copy()
black_mask = (image_data[..., :3] == 0).all(axis=-1)
image_data[black_mask, :3] = color
return Image.fromarray(image_data)
# Use the high-resolution (48x48 px) icon if it exists and is needed
with Image.open(path_large if (size > 24 and path_large.exists())
else path_regular) as im:
# assure a RGBA image as foreground color is RGB
im = im.convert("RGBA")
image = ImageTk.PhotoImage(im.resize((size, size)), master=self)
button._ntimage = image
# create a version of the icon with the button's text color
foreground = (255 / 65535) * np.array(
button.winfo_rgb(button.cget("foreground")))
im_alt = _recolor_icon(im, foreground)
image_alt = ImageTk.PhotoImage(
im_alt.resize((size, size)), master=self)
button._ntimage_alt = image_alt
if _is_dark("background"):
# For Checkbuttons, we need to set `image` and `selectimage` at
# the same time. Otherwise, when updating the `image` option
# (such as when changing DPI), if the old `selectimage` has
# just been overwritten, Tk will throw an error.
image_kwargs = {"image": image_alt}
else:
image_kwargs = {"image": image}
# Checkbuttons may switch the background to `selectcolor` in the
# checked state, so check separately which image it needs to use in
# that state to still ensure enough contrast with the background.
if (
isinstance(button, tk.Checkbutton)
and button.cget("selectcolor") != ""
):
if self._windowingsystem != "x11":
selectcolor = "selectcolor"
else:
# On X11, selectcolor isn't used directly for indicator-less
# buttons. See `::tk::CheckEnter` in the Tk button.tcl source
# code for details.
r1, g1, b1 = _get_color("selectcolor")
r2, g2, b2 = _get_color("activebackground")
selectcolor = ((r1+r2)/2, (g1+g2)/2, (b1+b2)/2)
if _is_dark(selectcolor):
image_kwargs["selectimage"] = image_alt
else:
image_kwargs["selectimage"] = image
button.configure(**image_kwargs, height='18p', width='18p')
def _Button(self, text, image_file, toggle, command):
if not toggle:
b = tk.Button(
master=self, text=text, command=command,
relief="flat", overrelief="groove", borderwidth=1,
)
else:
# There is a bug in tkinter included in some python 3.6 versions
# that without this variable, produces a "visual" toggling of
# other near checkbuttons
# https://bugs.python.org/issue29402
# https://bugs.python.org/issue25684
var = tk.IntVar(master=self)
b = tk.Checkbutton(
master=self, text=text, command=command, indicatoron=False,
variable=var, offrelief="flat", overrelief="groove",
borderwidth=1
)
b.var = var
b._image_file = image_file
if image_file is not None:
# Explicit class because ToolbarTk calls _Button.
NavigationToolbar2Tk._set_image_for_button(self, b)
else:
b.configure(font=self._label_font)
b.pack(side=tk.LEFT)
return b
def _Spacer(self):
# Buttons are also 18pt high.
s = tk.Frame(master=self, height='18p', relief=tk.RIDGE, bg='DarkGray')
s.pack(side=tk.LEFT, padx='3p')
return s
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
tk_filetypes = [
(name, " ".join(f"*.{ext}" for ext in exts))
for name, exts in sorted(filetypes.items())
]
default_extension = self.canvas.get_default_filetype()
default_filetype = self.canvas.get_supported_filetypes()[default_extension]
filetype_variable = tk.StringVar(self.canvas.get_tk_widget(), default_filetype)
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
# defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = os.path.expanduser(mpl.rcParams['savefig.directory'])
# get_default_filename() contains the default extension. On some platforms,
# choosing a different extension from the dropdown does not overwrite it,
# so we need to remove it to make the dropdown functional.
initialfile = pathlib.Path(self.canvas.get_default_filename()).stem
fname = tkinter.filedialog.asksaveasfilename(
master=self.canvas.get_tk_widget().master,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
typevariable=filetype_variable
)
if fname in ["", ()]:
return None
# Save dir for next time, unless empty str (i.e., use cwd).
if initialdir != "":
mpl.rcParams['savefig.directory'] = (
os.path.dirname(str(fname)))
# If the filename contains an extension, let savefig() infer the file
# format from that. If it does not, use the selected dropdown option.
if pathlib.Path(fname).suffix[1:] != "":
extension = None
else:
extension = filetypes[filetype_variable.get()][0]
try:
self.canvas.figure.savefig(fname, format=extension)
return fname
except Exception as e:
tkinter.messagebox.showerror("Error saving file", str(e))
def set_history_buttons(self):
state_map = {True: tk.NORMAL, False: tk.DISABLED}
can_back = self._nav_stack._pos > 0
can_forward = self._nav_stack._pos < len(self._nav_stack) - 1
if "Back" in self._buttons:
self._buttons['Back']['state'] = state_map[can_back]
if "Forward" in self._buttons:
self._buttons['Forward']['state'] = state_map[can_forward]
def add_tooltip(widget, text):
tipwindow = None
def showtip(event):
"""Display text in tooltip window."""
nonlocal tipwindow
if tipwindow or not text:
return
x, y, _, _ = widget.bbox("insert")
x = x + widget.winfo_rootx() + widget.winfo_width()
y = y + widget.winfo_rooty()
tipwindow = tk.Toplevel(widget)
tipwindow.overrideredirect(1)
tipwindow.geometry(f"+{x}+{y}")
try: # For Mac OS
tipwindow.tk.call("::tk::unsupported::MacWindowStyle",
"style", tipwindow._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tipwindow, text=text, justify=tk.LEFT,
relief=tk.SOLID, borderwidth=1)
label.pack(ipadx=1)
def hidetip(event):
nonlocal tipwindow
if tipwindow:
tipwindow.destroy()
tipwindow = None
widget.bind("<Enter>", showtip)
widget.bind("<Leave>", hidetip)
@backend_tools._register_tool_class(FigureCanvasTk)
| NavigationToolbar2Tk |
python | falconry__falcon | falcon/errors.py | {
"start": 6507,
"end": 8831
} | class ____(HTTPError):
"""400 Bad Request.
The server cannot or will not process the request due to something
that is perceived to be a client error (e.g., malformed request
syntax, invalid request message framing, or deceptive request
routing).
(See also: RFC 7231, Section 6.5.1)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '400 Bad Request').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
) -> None:
super().__init__(
status.HTTP_400,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPBadRequest |
python | django__django | tests/gis_tests/test_spatialrefsys.py | {
"start": 2306,
"end": 6136
} | class ____(TestCase):
@cached_property
def SpatialRefSys(self):
return connection.ops.connection.ops.spatial_ref_sys()
def test_get_units(self):
epsg_4326 = next(f for f in test_srs if f["srid"] == 4326)
unit, unit_name = self.SpatialRefSys().get_units(epsg_4326["wkt"])
self.assertEqual(unit_name, "degree")
self.assertAlmostEqual(unit, 0.01745329251994328)
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = self.SpatialRefSys.objects.get(srid=sd["srid"])
self.assertEqual(sd["srid"], srs.srid)
# Some of the authority names are borked on Oracle, e.g.,
# SRID=32140. Also, Oracle Spatial seems to add extraneous info to
# fields, hence the testing with the 'startswith' flag.
auth_name, oracle_flag = sd["auth_name"]
# Compare case-insensitively because srs.auth_name is lowercase
# ("epsg") on Spatialite.
if not connection.ops.oracle or oracle_flag:
self.assertIs(srs.auth_name.upper().startswith(auth_name), True)
self.assertEqual(sd["auth_srid"], srs.auth_srid)
# No PROJ and different srtext on Oracle.
if not connection.ops.oracle:
self.assertTrue(srs.wkt.startswith(sd["srtext"]))
self.assertRegex(srs.proj4text, sd["proj_re"])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = self.SpatialRefSys.objects.get(srid=sd["srid"])
self.assertTrue(sr.spheroid.startswith(sd["spheroid"]))
self.assertEqual(sd["geographic"], sr.geographic)
self.assertEqual(sd["projected"], sr.projected)
self.assertIs(sr.name.startswith(sd["name"]), True)
# Testing the SpatialReference object directly.
if not connection.ops.oracle:
srs = sr.srs
self.assertRegex(srs.proj, sd["proj_re"])
self.assertTrue(srs.wkt.startswith(sd["srtext"]))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd["ellipsoid"]
prec = sd["eprec"]
# Getting our spatial reference and its ellipsoid
srs = self.SpatialRefSys.objects.get(srid=sd["srid"])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature("supports_add_srs_entry")
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(self.SpatialRefSys.objects.filter(srid=3857).exists())
srs = self.SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
self.SpatialRefSys.get_spheroid(srs.wkt).startswith("SPHEROID[")
)
def test_srs_with_invalid_wkt_and_proj4(self):
class MockSpatialRefSys(SpatialRefSysMixin):
def __init__(self, wkt=None, proj4text=None):
self.wkt = wkt
self.proj4text = proj4text
with self.assertRaisesMessage(
Exception,
"Could not get OSR SpatialReference.\n"
"Error for WKT 'INVALID_WKT': Corrupt data.\n"
"Error for PROJ.4 '+proj=invalid': Corrupt data.",
):
MockSpatialRefSys(wkt="INVALID_WKT", proj4text="+proj=invalid").srs
| SpatialRefSysTest |
python | eventlet__eventlet | tests/greenthread_test.py | {
"start": 277,
"end": 464
} | class ____:
def assert_dead(self, gt):
if hasattr(gt, 'wait'):
self.assertRaises(greenlet.GreenletExit, gt.wait)
assert gt.dead
assert not gt
| Asserts |
python | nedbat__coveragepy | coverage/bytecode.py | {
"start": 1464,
"end": 6666
} | class ____:
"""Utility to step through trails of instructions.
We have two reasons to need sequences of instructions from a code object:
First, in strict sequence to visit all the instructions in the object.
This is `walk(follow_jumps=False)`. Second, we want to follow jumps to
understand how execution will flow: `walk(follow_jumps=True)`.
"""
def __init__(self, code: CodeType) -> None:
self.code = code
self.insts: dict[TOffset, dis.Instruction] = {}
inst = None
for inst in dis.get_instructions(code):
self.insts[inst.offset] = inst
assert inst is not None
self.max_offset = inst.offset
def walk(
self, *, start_at: TOffset = 0, follow_jumps: bool = True
) -> Iterable[dis.Instruction]:
"""
Yield instructions starting from `start_at`. Follow unconditional
jumps if `follow_jumps` is true.
"""
seen = set()
offset = start_at
while offset < self.max_offset + 1:
if offset in seen:
break
seen.add(offset)
if inst := self.insts.get(offset):
yield inst
if follow_jumps and inst.opcode in ALWAYS_JUMPS:
offset = inst.jump_target
continue
offset += 2
TBranchTrailsOneSource = dict[Optional[TArc], set[TOffset]]
TBranchTrails = dict[TOffset, TBranchTrailsOneSource]
def branch_trails(
code: CodeType,
multiline_map: Mapping[TLineNo, TLineNo],
) -> TBranchTrails:
"""
Calculate branch trails for `code`.
`multiline_map` maps line numbers to the first line number of a
multi-line statement.
Instructions can have a jump_target, where they might jump to next. Some
instructions with a jump_target are unconditional jumps (ALWAYS_JUMPS), so
they aren't interesting to us, since they aren't the start of a branch
possibility.
Instructions that might or might not jump somewhere else are branch
possibilities. For each of those, we track a trail of instructions. These
are lists of instruction offsets, the next instructions that can execute.
We follow the trail until we get to a new source line. That gives us the
arc from the original instruction's line to the new source line.
"""
the_trails: TBranchTrails = collections.defaultdict(lambda: collections.defaultdict(set))
iwalker = InstructionWalker(code)
for inst in iwalker.walk(follow_jumps=False):
if not inst.jump_target:
# We only care about instructions with jump targets.
continue
if inst.opcode in ALWAYS_JUMPS:
# We don't care about unconditional jumps.
continue
from_line = inst.line_number
if from_line is None:
continue
from_line = multiline_map.get(from_line, from_line)
def add_one_branch_trail(
trails: TBranchTrailsOneSource,
start_at: TOffset,
) -> None:
# pylint: disable=cell-var-from-loop
inst_offsets: set[TOffset] = set()
to_line = None
for inst2 in iwalker.walk(start_at=start_at, follow_jumps=True):
inst_offsets.add(inst2.offset)
l2 = inst2.line_number
if l2 is not None:
l2 = multiline_map.get(l2, l2)
if l2 and l2 != from_line:
to_line = l2
break
elif inst2.jump_target and (inst2.opcode not in ALWAYS_JUMPS):
break
elif inst2.opcode in RETURNS:
to_line = -code.co_firstlineno
break
if to_line is not None:
trails[(from_line, to_line)].update(inst_offsets)
else:
trails[None] = set()
# Calculate two trails: one from the next instruction, and one from the
# jump_target instruction.
trails: TBranchTrailsOneSource = collections.defaultdict(set)
add_one_branch_trail(trails, start_at=inst.offset + 2)
add_one_branch_trail(trails, start_at=inst.jump_target)
the_trails[inst.offset] = trails
# Sometimes we get BRANCH_RIGHT or BRANCH_LEFT events from instructions
# other than the original jump possibility instruction. Register each
# trail under all of their offsets so we can pick up in the middle of a
# trail if need be.
for arc, offsets in trails.items():
for offset in offsets:
the_trails[offset][arc].update(offsets)
return the_trails
def always_jumps(code: CodeType) -> dict[TOffset, TOffset]:
"""Make a map of unconditional bytecodes jumping to others.
Only include bytecodes that do no work and go to another bytecode.
"""
jumps = {}
iwalker = InstructionWalker(code)
for inst in iwalker.walk(follow_jumps=False):
if inst.opcode in ALWAYS_JUMPS:
jumps[inst.offset] = inst.jump_target
elif inst.opcode in NOPS:
jumps[inst.offset] = inst.offset + 2
return jumps
| InstructionWalker |
python | keras-team__keras | keras/src/backend/openvino/core.py | {
"start": 4481,
"end": 20481
} | class ____:
def __init__(self, x, data=None):
x_shape = x.get_partial_shape()
if x_shape.rank.is_dynamic:
x_keras_shape = None
else:
x_keras_shape = [
None if dim.is_dynamic else dim.get_length()
for dim in list(x_shape)
]
x_type = x.get_element_type()
x_keras_type = ov_to_keras_type(x_type)
self.output = x
self.shape = tuple(x_keras_shape)
self.dtype = x_keras_type
self.ndim = None
self.data = data
if x.get_partial_shape().rank.is_static:
self.ndim = x.get_partial_shape().rank.get_length()
def __add__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__add__"
)
return OpenVINOKerasTensor(ov_opset.add(first, other).output(0))
def __radd__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__radd__"
)
return OpenVINOKerasTensor(ov_opset.add(first, other).output(0))
def __sub__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__sub__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_xor(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.subtract(first, other).output(0))
def __rsub__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rsub__"
)
return OpenVINOKerasTensor(ov_opset.subtract(other, first).output(0))
def __mul__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__mul__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_and(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.multiply(first, other).output(0))
def __rmul__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rmul__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_and(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.multiply(first, other).output(0))
def __truediv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__truediv__"
)
return OpenVINOKerasTensor(ov_opset.divide(first, other).output(0))
def __rtruediv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rtruediv__"
)
return OpenVINOKerasTensor(ov_opset.divide(other, first).output(0))
def __floordiv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__floordiv__"
)
return OpenVINOKerasTensor(ov_opset.divide(first, other).output(0))
def __rfloordiv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rfloordiv__"
)
return OpenVINOKerasTensor(ov_opset.divide(other, first).output(0))
def __neg__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.negative(first).output(0))
def __abs__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.absolute(first).output(0))
def __invert__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.logical_not(first).output(0))
def __pow__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__pow__"
)
return OpenVINOKerasTensor(ov_opset.power(first, other).output(0))
def __rpow__(self, other):
other = get_ov_output(other)
first = self.output
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rpow__"
)
return OpenVINOKerasTensor(ov_opset.power(other, first).output(0))
def __lt__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__lt__"
)
return OpenVINOKerasTensor(ov_opset.less(first, other).output(0))
def __gt__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__gt__"
)
return OpenVINOKerasTensor(ov_opset.greater(first, other).output(0))
def __le__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__le__"
)
return OpenVINOKerasTensor(ov_opset.less_equal(first, other).output(0))
def __ge__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__ge__"
)
return OpenVINOKerasTensor(
ov_opset.greater_equal(first, other).output(0)
)
def __eq__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__eq__"
)
return OpenVINOKerasTensor(ov_opset.equal(first, other).output(0))
def __ne__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__ne__"
)
return OpenVINOKerasTensor(ov_opset.not_equal(first, other).output(0))
def __getitem__(self, indices):
data = self.output
rank = len(data.get_partial_shape())
axes, gather_indices_nodes = [], []
slice_axes, slice_starts, slice_ends, slice_steps = [], [], [], []
unsqueeze_axes = []
if not isinstance(indices, tuple):
indices = (indices,)
if any(i is Ellipsis for i in indices):
ellipsis_pos = indices.index(Ellipsis)
num_specified = sum(
i is not Ellipsis and i is not None for i in indices
)
num_missing = rank - num_specified
indices = (
indices[:ellipsis_pos]
+ (builtins.slice(None),) * num_missing
+ indices[ellipsis_pos + 1 :]
)
def count_unsqueeze_before(dim):
return sum(1 for i in range(dim) if indices[i] is None)
partial_shape = ov_opset.shape_of(data, Type.i32)
zero_const = ov_opset.constant(0, Type.i32)
for dim, index in enumerate(indices):
if isinstance(index, bool):
raise ValueError(
"OpenVINO backend does not support boolean indexing"
)
elif isinstance(index, (int, np.integer, np.ndarray)):
if isinstance(index, (np.ndarray, np.integer)):
if isinstance(index, np.ndarray) and len(index.shape) != 0:
raise ValueError(
"OpenVINO backend does not support"
"multi-dimensional indexing"
)
index = int(index)
actual_dim = dim - count_unsqueeze_before(dim)
if not (0 <= actual_dim < rank):
raise IndexError(
f"Index {index} is out of bounds for "
f"axis {dim} with rank {rank}"
)
length = ov_opset.gather(
partial_shape,
ov_opset.constant([actual_dim], Type.i32),
zero_const,
)
if index >= 0:
idx_value = ov_opset.constant([index], Type.i32)
else:
idx_value = ov_opset.add(
ov_opset.constant([index], Type.i32), length
)
axes.append(dim)
gather_indices_nodes.append(idx_value.output(0))
elif isinstance(index, builtins.slice):
if index == builtins.slice(None):
continue
if index.step is not None and index.step < 0:
raise ValueError("OpenVINO doesn't support negative steps")
slice_axes.append(dim)
slice_starts.append(0 if index.start is None else index.start)
slice_ends.append(
2**31 - 1 if index.stop is None else index.stop
)
slice_steps.append(1 if index.step is None else index.step)
elif index is None:
unsqueeze_axes.append(dim)
elif isinstance(index, OpenVINOKerasTensor):
index = get_ov_output(index)
index_type = index.get_element_type()
index_shape = index.get_partial_shape()
if index_type == Type.boolean or not index_type.is_integral():
raise ValueError(
"OpenVINO backend does not "
f"support {index_type} indexing"
)
axes.append(dim)
if len(index_shape) > 1:
raise ValueError(
"OpenVINO backend does not "
"support multi-dimensional indexing"
)
if len(index_shape) == 0:
index = ov_opset.unsqueeze(index, zero_const).output(0)
if index_type != Type.i32:
index = ov_opset.convert(index, Type.i32).output(0)
shape_tensor = ov_opset.shape_of(data, Type.i32)
axis_i32 = ov_opset.constant([dim], dtype=Type.i32)
dim_size = ov_opset.gather(shape_tensor, axis_i32, zero_const)
is_negative = ov_opset.less(index, zero_const)
adjusted_index = ov_opset.add(index, dim_size)
index = ov_opset.select(
is_negative, adjusted_index, index
).output(0)
gather_indices_nodes.append(index)
else:
raise ValueError(
f"Unsupported index type {type(index)} "
"in OpenVINOKerasTensor.__getitem__"
)
if slice_axes:
step = ov_opset.constant(slice_steps, Type.i32).output(0)
start = ov_opset.constant(slice_starts, Type.i32).output(0)
stop = ov_opset.constant(slice_ends, Type.i32).output(0)
adjusted_slice_axes = [
ax - sum(1 for unsq in unsqueeze_axes if unsq <= ax)
for ax in slice_axes
]
axes_const = ov_opset.constant(
adjusted_slice_axes, Type.i32
).output(0)
data = ov_opset.slice(data, start, stop, step, axes_const).output(0)
if axes:
gather_indices_const = (
gather_indices_nodes[0]
if len(gather_indices_nodes) == 1
else ov_opset.concat(gather_indices_nodes, axis=0).output(0)
)
adjusted_axes = [
ax - sum(1 for unsq in unsqueeze_axes if unsq <= ax)
for ax in axes
]
if len(axes) == 1:
data = ov_opset.gather(
data, gather_indices_const, adjusted_axes[0]
).output(0)
data = ov_opset.squeeze(data, adjusted_axes[0]).output(0)
else:
rank = len(data.get_partial_shape())
remaining_axes = [
i for i in range(rank) if i not in adjusted_axes
]
perm = ov_opset.constant(
adjusted_axes + remaining_axes, Type.i32
)
data = ov_opset.transpose(data, perm).output(0)
data = ov_opset.gather_nd(data, gather_indices_const).output(0)
if unsqueeze_axes:
adjusted_unsqueeze = []
for ax in unsqueeze_axes:
ax -= sum(1 for s in axes if s < ax)
ax -= sum(1 for s in slice_axes if s < ax)
adjusted_unsqueeze.append(ax)
unsqueeze_const = ov_opset.constant(
adjusted_unsqueeze, Type.i32
).output(0)
data = ov_opset.unsqueeze(data, unsqueeze_const).output(0)
return OpenVINOKerasTensor(data)
def __len__(self):
ov_output = self.output
ov_shape = ov_output.get_partial_shape()
assert ov_shape.rank.is_static and ov_shape.rank.get_length() > 0, (
"rank must be static and greater than zero"
)
assert ov_shape[0].is_static, "the first dimension must be static"
return ov_shape[0].get_length()
def __mod__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__mod__"
)
return OpenVINOKerasTensor(ov_opset.mod(first, other).output(0))
def __array__(self, dtype=None):
try:
tensor = cast(self, dtype=dtype) if dtype is not None else self
return convert_to_numpy(tensor)
except Exception as e:
raise RuntimeError(
"An OpenVINOKerasTensor is symbolic: it's a placeholder "
"for a shape and a dtype.\n"
"It doesn't have any actual numerical value.\n"
"You cannot convert it to a NumPy array."
) from e
def numpy(self):
return self.__array__()
def ov_to_keras_type(ov_type):
for _keras_type, _ov_type in OPENVINO_DTYPES.items():
if ov_type == _ov_type:
return _keras_type
raise ValueError(
f"Requested OpenVINO type has no keras analogue '{ov_type.to_string()}'"
)
@contextlib.contextmanager
def device_scope(device_name):
current_device = _parse_device_input(device_name)
global_state.set_global_attribute("openvino_device", current_device)
def get_device():
device = global_state.get_global_attribute("openvino_device", None)
if device is None:
return "CPU"
return device
def _parse_device_input(device_name):
if isinstance(device_name, str):
# We support string value like "cpu:0", "gpu:1", and need to convert
# "gpu" to "cuda"
device_name = device_name.upper()
device_type, _ = device_name.split(":")
return device_type
else:
raise ValueError(
"Invalid value for argument `device_name`. "
"Expected a string like 'gpu:0' or 'cpu'. "
f"Received: device_name='{device_name}'"
)
return device_name
| OpenVINOKerasTensor |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/base.py | {
"start": 417,
"end": 5234
} | class ____(BaseReader):
"""
BFS Web Scraper for websites.
This class provides functionality to scrape entire websites using a breadth-first search algorithm.
It navigates web pages from a given base URL, following links that match a specified prefix.
Attributes:
prefix (str): URL prefix to focus the scraping.
max_depth (int): Maximum depth for BFS algorithm.
Args:
prefix (str): URL prefix for scraping.
max_depth (int, optional): Maximum depth for BFS. Defaults to 10.
uri_as_id (bool, optional): Whether to use the URI as the document ID. Defaults to False.
"""
def __init__(
self,
prefix: str,
max_depth: int = 10,
uri_as_id: bool = False,
driver: Optional[webdriver.Chrome] = None,
) -> None:
"""
Initialize the WholeSiteReader with the provided prefix and maximum depth.
"""
self.prefix = prefix
self.max_depth = max_depth
self.uri_as_id = uri_as_id
self.driver = driver if driver else self.setup_driver()
def setup_driver(self):
"""
Sets up the Selenium WebDriver for Chrome.
Returns:
WebDriver: An instance of Chrome WebDriver.
"""
try:
import chromedriver_autoinstaller
except ImportError:
raise ImportError("Please install chromedriver_autoinstaller")
opt = webdriver.ChromeOptions()
opt.add_argument("--start-maximized")
chromedriver_autoinstaller.install()
return webdriver.Chrome(options=opt)
def clean_url(self, url):
return url.split("#")[0]
def restart_driver(self):
self.driver.quit()
self.driver = self.setup_driver()
def extract_content(self):
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
body_element = self.driver.find_element(By.TAG_NAME, "body")
return body_element.text.strip()
def extract_links(self):
js_script = """
var links = [];
var elements = document.getElementsByTagName('a');
for (var i = 0; i < elements.length; i++) {
var href = elements[i].href;
if (href) {
links.push(href);
}
}
return links;
"""
return self.driver.execute_script(js_script)
def load_data(self, base_url: str) -> List[Document]:
"""
Load data from the base URL using BFS algorithm.
Args:
base_url (str): Base URL to start scraping.
Returns:
List[Document]: List of scraped documents.
"""
added_urls = set()
urls_to_visit = [(base_url, 0)]
documents = []
while urls_to_visit:
current_url, depth = urls_to_visit.pop(0)
print(f"Visiting: {current_url}, {len(urls_to_visit)} left")
try:
self.driver.get(current_url)
page_content = self.extract_content()
added_urls.add(current_url)
next_depth = depth + 1
if next_depth <= self.max_depth:
# links = self.driver.find_elements(By.TAG_NAME, 'a')
links = self.extract_links()
# clean all urls
links = [self.clean_url(link) for link in links]
# extract new links
links = [link for link in links if link not in added_urls]
print(f"Found {len(links)} new potential links")
for href in links:
try:
if href.startswith(self.prefix) and href not in added_urls:
urls_to_visit.append((href, next_depth))
added_urls.add(href)
except Exception:
continue
doc = Document(text=page_content, extra_info={"URL": current_url})
if self.uri_as_id:
warnings.warn(
"Setting the URI as the id of the document might break the code execution downstream and should be avoided."
)
doc.id_ = current_url
documents.append(doc)
time.sleep(1)
except WebDriverException:
print("WebDriverException encountered, restarting driver...")
self.restart_driver()
except Exception as e:
print(f"An unexpected exception occurred: {e}, skipping URL...")
continue
self.driver.quit()
return documents
| WholeSiteReader |
python | scikit-image__scikit-image | benchmarks/benchmark_transform_warp.py | {
"start": 268,
"end": 2073
} | class ____:
params = (
[np.uint8, np.uint16, np.float32, np.float64],
[128, 1024, 4096],
[0, 1, 3],
# [np.float32, np.float64]
)
# param_names = ['dtype_in', 'N', 'order', 'dtype_tform']
param_names = ['dtype_in', 'N', 'order']
# def setup(self, dtype_in, N, order, dtype_tform):
def setup(self, dtype_in, N, order):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Possible precision loss")
self.image = convert(np.random.random((N, N)), dtype=dtype_in)
self.tform = SimilarityTransform(
scale=1, rotation=np.pi / 10, translation=(0, 4)
)
self.tform.params = self.tform.params.astype('float32')
self.order = order
if 'dtype' in inspect.signature(warp).parameters:
self.warp = functools.partial(warp, dtype=self.image.dtype)
else:
# Keep a call to functools to have the same number of python
# function calls
self.warp = functools.partial(warp)
# def time_same_type(self, dtype_in, N, order, dtype_tform):
def time_same_type(self, dtype_in, N, order):
"""Test the case where the users wants to preserve their same low
precision data type."""
result = self.warp(
self.image, self.tform, order=self.order, preserve_range=True
)
# convert back to input type, no-op if same type
result = result.astype(dtype_in, copy=False)
# def time_to_float64(self, dtype_in, N, order, dtype_form):
def time_to_float64(self, dtype_in, N, order):
"""Test the case where want to upvert to float64 for continued
transformations."""
warp(self.image, self.tform, order=self.order, preserve_range=True)
| WarpSuite |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 13459,
"end": 13761
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
self.result = int(self.result)
######################################################################
| longTestCase |
python | cookiecutter__cookiecutter | tests/test_prompt.py | {
"start": 555,
"end": 2428
} | class ____:
"""Class to unite simple and complex tests for render_variable function."""
@pytest.mark.parametrize(
'raw_var, rendered_var',
[
(1, '1'),
(True, True),
('foo', 'foo'),
('{{cookiecutter.project}}', 'foobar'),
(None, None),
],
)
def test_convert_to_str(self, mocker, raw_var, rendered_var) -> None:
"""Verify simple items correctly rendered to strings."""
env = environment.StrictEnvironment()
from_string = mocker.patch(
'cookiecutter.utils.StrictEnvironment.from_string', wraps=env.from_string
)
context = {'project': 'foobar'}
result = prompt.render_variable(env, raw_var, context)
assert result == rendered_var
# Make sure that non None non str variables are converted beforehand
if raw_var is not None and not isinstance(raw_var, bool):
if not isinstance(raw_var, str):
raw_var = str(raw_var)
from_string.assert_called_once_with(raw_var)
else:
assert not from_string.called
@pytest.mark.parametrize(
'raw_var, rendered_var',
[
({1: True, 'foo': False}, {'1': True, 'foo': False}),
(
{'{{cookiecutter.project}}': ['foo', 1], 'bar': False},
{'foobar': ['foo', '1'], 'bar': False},
),
(['foo', '{{cookiecutter.project}}', None], ['foo', 'foobar', None]),
],
)
def test_convert_to_str_complex_variables(self, raw_var, rendered_var) -> None:
"""Verify tree items correctly rendered."""
env = environment.StrictEnvironment()
context = {'project': 'foobar'}
result = prompt.render_variable(env, raw_var, context)
assert result == rendered_var
| TestRenderVariable |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/common.py | {
"start": 1151,
"end": 2653
} | class ____(CPython, WindowsSupports, ABC):
@classmethod
def _executables(cls, interpreter):
# symlink of the python executables does not work reliably, copy always instead
# - https://bugs.python.org/issue42013
# - venv
host = cls.host_python(interpreter)
names = {"python.exe", host.name}
if interpreter.version_info.major == 3: # noqa: PLR2004
names.update({"python3.exe", "python3"})
for path in (host.parent / n for n in names):
yield host, [path.name], RefMust.COPY, RefWhen.ANY
# for more info on pythonw.exe see https://stackoverflow.com/a/30313091
python_w = host.parent / "pythonw.exe"
yield python_w, [python_w.name], RefMust.COPY, RefWhen.ANY
@classmethod
def host_python(cls, interpreter):
return Path(interpreter.system_executable)
def is_mac_os_framework(interpreter):
if interpreter.platform == "darwin":
return interpreter.sysconfig_vars.get("PYTHONFRAMEWORK") == "Python3"
return False
def is_macos_brew(interpreter):
return interpreter.platform == "darwin" and _BREW.fullmatch(interpreter.system_prefix) is not None
_BREW = re.compile(
r"/(usr/local|opt/homebrew)/(opt/python@3\.\d{1,2}|Cellar/python@3\.\d{1,2}/3\.\d{1,2}\.\d{1,2})/Frameworks/"
r"Python\.framework/Versions/3\.\d{1,2}",
)
__all__ = [
"CPython",
"CPythonPosix",
"CPythonWindows",
"is_mac_os_framework",
"is_macos_brew",
]
| CPythonWindows |
python | yaml__pyyaml | lib/yaml/constructor.py | {
"start": 28592,
"end": 28639
} | class ____(UnsafeConstructor):
pass
| Constructor |
python | keras-team__keras | keras/src/initializers/random_initializers.py | {
"start": 2880,
"end": 4855
} | class ____(RandomInitializer):
"""Initializer that generates a truncated normal distribution.
The values generated are similar to values from a
`RandomNormal` initializer, except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.truncated_normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.RandomUniform",
"keras.initializers.random_uniform",
]
)
| TruncatedNormal |
python | pypa__pipenv | pipenv/vendor/click/core.py | {
"start": 31503,
"end": 44709
} | class ____:
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: The context class to create with :meth:`make_context`.
#:
#: .. versionadded:: 8.0
context_class: t.Type[Context] = Context
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(
self,
name: t.Optional[str],
context_settings: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> None:
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings: t.MutableMapping[str, t.Any] = context_settings
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation. This traverses the entire structure
below this command.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
:param ctx: A :class:`Context` representing this command.
.. versionadded:: 8.0
"""
return {"name": self.name}
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.name}>"
def get_usage(self, ctx: Context) -> str:
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx: Context) -> str:
raise NotImplementedError("Base commands cannot get help")
def make_context(
self,
info_name: t.Optional[str],
args: t.List[str],
parent: t.Optional[Context] = None,
**extra: t.Any,
) -> Context:
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
To quickly customize the context class used without overriding
this method, set the :attr:`context_class` attribute.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it's
the name of the command.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
.. versionchanged:: 8.0
Added the :attr:`context_class` attribute.
"""
for key, value in self.context_settings.items():
if key not in extra:
extra[key] = value
ctx = self.context_class(
self, info_name=info_name, parent=parent, **extra # type: ignore
)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError("Base commands do not know how to parse arguments.")
def invoke(self, ctx: Context) -> t.Any:
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invocable by default")
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of chained multi-commands.
Any command could be part of a chained multi-command, so sibling
commands are valid at any point during command completion. Other
command classes will return more completions.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from pipenv.vendor.click.shell_completion import CompletionItem
results: t.List["CompletionItem"] = []
while ctx.parent is not None:
ctx = ctx.parent
if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
results.extend(
CompletionItem(name, help=command.get_short_help_str())
for name, command in _complete_visible_commands(ctx, incomplete)
if name not in ctx.protected_args
)
return results
@t.overload
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: "te.Literal[True]" = True,
**extra: t.Any,
) -> "te.NoReturn":
...
@t.overload
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: bool = ...,
**extra: t.Any,
) -> t.Any:
...
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: bool = True,
windows_expand_args: bool = True,
**extra: t.Any,
) -> t.Any:
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param windows_expand_args: Expand glob patterns, user dir, and
env vars in command line args on Windows.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
.. versionchanged:: 8.0.1
Added the ``windows_expand_args`` parameter to allow
disabling command line arg expansion on Windows.
.. versionchanged:: 8.0
When taking arguments from ``sys.argv`` on Windows, glob
patterns, user dir, and env vars are expanded.
.. versionchanged:: 3.0
Added the ``standalone_mode`` parameter.
"""
if args is None:
args = sys.argv[1:]
if os.name == "nt" and windows_expand_args:
args = _expand_args(args)
else:
args = list(args)
if prog_name is None:
prog_name = _detect_program_name()
# Process shell completion requests and exit early.
self._main_shell_completion(extra, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt) as e:
echo(file=sys.stderr)
raise Abort() from e
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except OSError as e:
if e.errno == errno.EPIPE:
sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout))
sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr))
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo(_("Aborted!"), file=sys.stderr)
sys.exit(1)
def _main_shell_completion(
self,
ctx_args: t.MutableMapping[str, t.Any],
prog_name: str,
complete_var: t.Optional[str] = None,
) -> None:
"""Check if the shell is asking for tab completion, process
that, then exit early. Called from :meth:`main` before the
program is invoked.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction. Defaults to
``_{PROG_NAME}_COMPLETE``.
.. versionchanged:: 8.2.0
Dots (``.``) in ``prog_name`` are replaced with underscores (``_``).
"""
if complete_var is None:
complete_name = prog_name.replace("-", "_").replace(".", "_")
complete_var = f"_{complete_name}_COMPLETE".upper()
instruction = os.environ.get(complete_var)
if not instruction:
return
from .shell_completion import shell_complete
rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction)
sys.exit(rv)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
| BaseCommand |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 23301,
"end": 23869
} | class ____:
def __init__(self, role_name: str, entities: List[milvus_types.UserEntity]):
self._role_name = role_name
users = []
for entity in entities:
if isinstance(entity, milvus_types.UserEntity):
users.append(entity.name)
self._users = tuple(users)
def __repr__(self) -> str:
return f"RoleItem: <role_name:{self.role_name}>, <users:{self.users}>"
@property
def role_name(self):
return self._role_name
@property
def users(self):
return self._users
| RoleItem |
python | django__django | tests/requests_tests/tests.py | {
"start": 753,
"end": 940
} | class ____(MemoryFileUploadHandler):
def handle_raw_input(
self, input_data, META, content_length, boundary, encoding=None
):
raise ValueError
| ErrorFileUploadHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 12654,
"end": 13333
} | class ____(GoogleAdsHttpRequester):
"""
Custom HTTP requester for ClickView stream.
"""
schema_loader: InlineSchemaLoader = None
def get_request_body_json(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
schema = self.schema_loader.get_json_schema()["properties"]
fields = [field for field in schema.keys()]
return {"query": f"SELECT {', '.join(fields)} FROM click_view WHERE segments.date = '{stream_slice['start_time']}'"}
@dataclass
| ClickViewHttpRequester |
python | django__django | tests/queries/tests.py | {
"start": 123452,
"end": 125698
} | class ____(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ["one", "two", "three"]
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [
("un", 1, objectas[0]),
("deux", 2, objectas[0]),
("trois", 3, objectas[2]),
]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux", objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name="deux")
Q2 = Q(objectc__objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__name="ein")
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__objectb__name="trois")
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name="one", objectc__objecta__name="two")
Q2 = Q(
objecta__objectc__name="ein",
objectc__objecta__name="three",
objecta__objectb__name="trois",
)
self.check_union(ObjectB, Q1, Q2)
| UnionTests |
python | encode__django-rest-framework | rest_framework/response.py | {
"start": 388,
"end": 3543
} | class ____(SimpleTemplateResponse):
"""
An HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
def __init__(self, data=None, status=None,
template_name=None, headers=None,
exception=False, content_type=None):
"""
Alters the init arguments slightly.
For example, drop 'template_name', and instead use 'data'.
Setting 'renderer' and 'media_type' will typically be deferred,
For example being set automatically by the `APIView`.
"""
super().__init__(None, status=status)
if isinstance(data, Serializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.error`. representation.'
)
raise AssertionError(msg)
self.data = data
self.template_name = template_name
self.exception = exception
self.content_type = content_type
if headers:
for name, value in headers.items():
self[name] = value
# Allow generic typing checking for responses.
def __class_getitem__(cls, *args, **kwargs):
return cls
@property
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
accepted_media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, ".accepted_renderer not set on Response"
assert accepted_media_type, ".accepted_media_type not set on Response"
assert context is not None, ".renderer_context not set on Response"
context['response'] = self
media_type = renderer.media_type
charset = renderer.charset
content_type = self.content_type
if content_type is None and charset is not None:
content_type = f"{media_type}; charset={charset}"
elif content_type is None:
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, accepted_media_type, context)
if isinstance(ret, str):
assert charset, (
'renderer returned unicode, and did not specify '
'a charset value.'
)
return ret.encode(charset)
if not ret:
del self['Content-Type']
return ret
@property
def status_text(self):
"""
Returns reason text corresponding to our HTTP response status code.
Provided for convenience.
"""
return responses.get(self.status_code, '')
def __getstate__(self):
"""
Remove attributes from the response that shouldn't be cached.
"""
state = super().__getstate__()
for key in (
'accepted_renderer', 'renderer_context', 'resolver_match',
'client', 'request', 'json', 'wsgi_request'
):
if key in state:
del state[key]
state['_closable_objects'] = []
return state
| Response |
python | walkccc__LeetCode | solutions/71. Simplify Path/71.py | {
"start": 0,
"end": 286
} | class ____:
def simplifyPath(self, path: str) -> str:
stack = []
for str in path.split('/'):
if str in ('', '.'):
continue
if str == '..':
if stack:
stack.pop()
else:
stack.append(str)
return '/' + '/'.join(stack)
| Solution |
python | pydantic__pydantic | pydantic/json_schema.py | {
"start": 114939,
"end": 116494
} | class ____:
"""!!! abstract "Usage Documentation"
[`WithJsonSchema` Annotation](../concepts/json_schema.md#withjsonschema-annotation)
Add this as an annotation on a field to override the (base) JSON schema that would be generated for that field.
This provides a way to set a JSON schema for types that would otherwise raise errors when producing a JSON schema,
such as Callable, or types that have an is-instance core schema, without needing to go so far as creating a
custom subclass of pydantic.json_schema.GenerateJsonSchema.
Note that any _modifications_ to the schema that would normally be made (such as setting the title for model fields)
will still be performed.
If `mode` is set this will only apply to that schema generation mode, allowing you
to set different json schemas for validation and serialization.
"""
json_schema: JsonSchemaValue | None
mode: Literal['validation', 'serialization'] | None = None
def __get_pydantic_json_schema__(
self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
mode = self.mode or handler.mode
if mode != handler.mode:
return handler(core_schema)
if self.json_schema is None:
# This exception is handled in pydantic.json_schema.GenerateJsonSchema._named_required_fields_schema
raise PydanticOmit
else:
return self.json_schema.copy()
def __hash__(self) -> int:
return hash(type(self.mode))
| WithJsonSchema |
python | python-pillow__Pillow | Tests/test_color_lut.py | {
"start": 10622,
"end": 16319
} | class ____:
def test_wrong_args(self) -> None:
with pytest.raises(ValueError, match="should be either an integer"):
ImageFilter.Color3DLUT("small", [1]) # type: ignore[arg-type]
with pytest.raises(ValueError, match="should be either an integer"):
ImageFilter.Color3DLUT((11, 11), [1]) # type: ignore[arg-type]
with pytest.raises(ValueError, match=r"in \[2, 65\] range"):
ImageFilter.Color3DLUT((11, 11, 1), [1])
with pytest.raises(ValueError, match=r"in \[2, 65\] range"):
ImageFilter.Color3DLUT((11, 11, 66), [1])
with pytest.raises(ValueError, match="table should have .+ items"):
ImageFilter.Color3DLUT((3, 3, 3), [1, 1, 1])
with pytest.raises(ValueError, match="table should have .+ items"):
ImageFilter.Color3DLUT((3, 3, 3), [[1, 1, 1]] * 2)
with pytest.raises(ValueError, match="should have a length of 4"):
ImageFilter.Color3DLUT((3, 3, 3), [[1, 1, 1]] * 27, channels=4)
with pytest.raises(ValueError, match="should have a length of 3"):
ImageFilter.Color3DLUT((2, 2, 2), [[1, 1]] * 8)
with pytest.raises(ValueError, match="Only 3 or 4 output"):
ImageFilter.Color3DLUT((2, 2, 2), [[1, 1]] * 8, channels=2)
def test_convert_table(self) -> None:
lut = ImageFilter.Color3DLUT(2, [0, 1, 2] * 8)
assert tuple(lut.size) == (2, 2, 2)
assert lut.name == "Color 3D LUT"
# fmt: off
lut = ImageFilter.Color3DLUT((2, 2, 2), [
(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11),
(12, 13, 14), (15, 16, 17), (18, 19, 20), (21, 22, 23)])
# fmt: on
assert tuple(lut.size) == (2, 2, 2)
assert lut.table == list(range(24))
lut = ImageFilter.Color3DLUT((2, 2, 2), [(0, 1, 2, 3)] * 8, channels=4)
assert tuple(lut.size) == (2, 2, 2)
assert lut.table == list(range(4)) * 8
@pytest.mark.skipif(numpy is None, reason="NumPy not installed")
def test_numpy_sources(self) -> None:
assert numpy is not None
table = numpy.ones((5, 6, 7, 3), dtype=numpy.float16)
with pytest.raises(ValueError, match="should have either channels"):
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
table = numpy.ones((7, 6, 5, 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
assert isinstance(lut.table, numpy.ndarray)
assert lut.table.dtype == table.dtype
assert lut.table.shape == (table.size,)
table = numpy.ones((7 * 6 * 5, 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
assert isinstance(lut.table, numpy.ndarray)
assert lut.table.shape == (table.size,)
table = numpy.ones((7 * 6 * 5 * 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
assert isinstance(lut.table, numpy.ndarray)
assert lut.table.shape == (table.size,)
# Check application
Image.new("RGB", (10, 10), 0).filter(lut)
# Check copy
table[0] = 33
assert lut.table[0] == 1
# Check not copy
table = numpy.ones((7 * 6 * 5 * 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table, _copy_table=False)
table[0] = 33
assert lut.table[0] == 33
@pytest.mark.skipif(numpy is None, reason="NumPy not installed")
def test_numpy_formats(self) -> None:
assert numpy is not None
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.float32)[:-1]
with pytest.raises(ValueError, match="should have table_channels"):
im.filter(lut)
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.float32).reshape((7 * 9 * 11), 3)
with pytest.raises(ValueError, match="should have table_channels"):
im.filter(lut)
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.float16)
assert_image_equal(im, im.filter(lut))
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.float32)
assert_image_equal(im, im.filter(lut))
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.float64)
assert_image_equal(im, im.filter(lut))
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
lut.table = numpy.array(lut.table, dtype=numpy.int32)
im.filter(lut)
lut.table = numpy.array(lut.table, dtype=numpy.int8)
im.filter(lut)
def test_repr(self) -> None:
lut = ImageFilter.Color3DLUT(2, [0, 1, 2] * 8)
assert repr(lut) == "<Color3DLUT from list size=2x2x2 channels=3>"
lut = ImageFilter.Color3DLUT(
(3, 4, 5),
array("f", [0, 0, 0, 0] * (3 * 4 * 5)),
channels=4,
target_mode="YCbCr",
_copy_table=False,
)
assert (
repr(lut)
== "<Color3DLUT from array size=3x4x5 channels=4 target_mode=YCbCr>"
)
| TestColorLut3DFilter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.