language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | py-pdf__pypdf | pypdf/annotations/_markup_annotations.py | {
"start": 8064,
"end": 8738
} | class ____(MarkupAnnotation):
def __init__(
self,
rect: Union[RectangleObject, tuple[float, float, float, float]],
*,
interior_color: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.update(
{
NameObject("/Type"): NameObject("/Annot"),
NameObject("/Subtype"): NameObject("/Circle"),
NameObject("/Rect"): RectangleObject(rect),
}
)
if interior_color:
self[NameObject("/IC")] = ArrayObject(
[FloatObject(n) for n in hex_to_rgb(interior_color)]
)
| Ellipse |
python | openai__openai-python | src/openai/_exceptions.py | {
"start": 4076,
"end": 4616
} | class ____(OpenAIError):
completion: ChatCompletion
"""The completion that caused this error.
Note: this will *not* be a complete `ChatCompletion` object when streaming as `usage`
will not be included.
"""
def __init__(self, *, completion: ChatCompletion) -> None:
msg = "Could not parse response content as the length limit was reached"
if completion.usage:
msg += f" - {completion.usage}"
super().__init__(msg)
self.completion = completion
| LengthFinishReasonError |
python | conda__conda | conda/gateways/connection/adapters/localfs.py | {
"start": 532,
"end": 1821
} | class ____(BaseAdapter):
def send(
self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None
):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = stat(pathname)
except OSError as exc:
resp.status_code = 404
message = {
"error": "file does not exist",
"path": pathname,
"exception": repr(exc),
}
fh = SpooledTemporaryFile()
fh.write(ensure_binary(json.dumps(message)))
fh.seek(0)
resp.raw = fh
resp.close = resp.raw.close
else:
modified = formatdate(stats.st_mtime, usegmt=True)
content_type = guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict(
{
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
}
)
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass # pragma: no cover
| LocalFSAdapter |
python | coleifer__peewee | tests/regressions.py | {
"start": 31460,
"end": 32134
} | class ____(ModelTestCase):
requires = [User]
def tearDown(self):
try:
self.execute('drop view user_testview_fm')
except Exception as exc:
pass
super(TestViewFieldMapping, self).tearDown()
def test_view_field_mapping(self):
user = User.create(username='huey')
self.execute('create view user_testview_fm as '
'select id, username from users')
class View(User):
class Meta:
table_name = 'user_testview_fm'
self.assertEqual([(v.id, v.username) for v in View.select()],
[(user.id, 'huey')])
| TestViewFieldMapping |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 15943,
"end": 16913
} | class ____:
op: str
group: str
description: str | None
frequency: int | None
count: int | None
avg_occurrences: float | None
sum_exclusive_time: float | None
p50_exclusive_time: float | None
p75_exclusive_time: float | None
p95_exclusive_time: float | None
p99_exclusive_time: float | None
def serialize(self) -> Any:
return {
"op": self.op,
"group": self.group.rjust(16, "0"),
"description": self.description,
"frequency": self.frequency,
"count": self.count,
"avgOccurrences": self.avg_occurrences,
"sumExclusiveTime": self.sum_exclusive_time,
"p50ExclusiveTime": self.p50_exclusive_time,
"p75ExclusiveTime": self.p75_exclusive_time,
"p95ExclusiveTime": self.p95_exclusive_time,
"p99ExclusiveTime": self.p99_exclusive_time,
}
@dataclasses.dataclass(frozen=True)
| SuspectSpan |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 10117,
"end": 10228
} | class ____(OpcodeWithArg): # Arg: Number of tuple items
_FLAGS = HAS_ARGUMENT
__slots__ = ()
| UNPACK_SEQUENCE |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 168748,
"end": 168922
} | class ____(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
| SendrecvmsgTCPTestBase |
python | facebook__pyre-check | tools/upgrade/commands/expand_target_coverage.py | {
"start": 676,
"end": 4357
} | class ____(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
repository: Repository,
local_configuration: Optional[str],
fixme_threshold: bool,
target_prefix: str,
) -> None:
super().__init__(command_arguments, repository)
self._local_configuration: Final[Optional[str]] = local_configuration
self._fixme_threshold: bool = fixme_threshold
self._target_prefix: str = target_prefix
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "ExpandTargetCoverage":
command_arguments = CommandArguments.from_arguments(arguments)
return ExpandTargetCoverage(
command_arguments,
repository=repository,
local_configuration=arguments.local_configuration,
fixme_threshold=arguments.fixme_threshold,
target_prefix=arguments.target_prefix,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(ExpandTargetCoverage, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"-l",
"--local-configuration",
type=path_exists,
help="Path to project root with local configuration",
)
parser.add_argument(
"--fixme-threshold",
type=int,
help="Ignore all errors in a file if fixme count exceeds threshold.",
)
parser.add_argument(
"--target-prefix",
type=str,
help="The prefix to include in the expanded target.",
)
@override
def run(self) -> None:
local_root = self._local_configuration
local_root = Path(local_root) if local_root else Path.cwd()
# Do not change if configurations exist below given root
existing_configurations = find_files(local_root, ".pyre_configuration.local")
if existing_configurations and not existing_configurations == [
str(local_root / ".pyre_configuration.local")
]:
LOG.warning(
"Cannot expand targets because nested configurations exist:\n%s",
"\n".join(existing_configurations),
)
return
# Expand coverage
local_configuration = Configuration.find_local_configuration(local_root)
if not local_configuration:
LOG.warning("Could not find a local configuration to codemod.")
return
LOG.info("Expanding typecheck targets in `%s`", local_configuration)
configuration = Configuration(local_configuration)
existing_targets = configuration.targets
glob_target = "{}//{}/...".format(self._target_prefix, str(local_root))
if existing_targets == [glob_target]:
LOG.info("Configuration is already fully expanded.")
return
configuration.add_targets([glob_target])
configuration.deduplicate_targets()
configuration.write()
# Suppress errors
self._get_and_suppress_errors(
configuration,
error_source=ErrorSource.GENERATE,
fixme_threshold=self._fixme_threshold,
fixme_threshold_fallback_mode=LocalMode.IGNORE,
)
self._repository.commit_changes(
commit=(not self._no_commit),
title=f"Expand target type coverage in {local_root}",
summary="Expanding type coverage of targets in configuration.",
set_dependencies=False,
)
| ExpandTargetCoverage |
python | getsentry__sentry | src/sentry/hybridcloud/models/webhookpayload.py | {
"start": 571,
"end": 689
} | class ____(TextChoices):
SENTRY_REGION = "sentry_region"
CODECOV = "codecov"
@control_silo_model
| DestinationType |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 1598,
"end": 4462
} | class ____(nn.Module):
def forward(
self,
value: Tensor,
value_spatial_shapes: Tensor,
value_spatial_shapes_list: list[tuple],
level_start_index: Tensor,
sampling_locations: Tensor,
attention_weights: Tensor,
im2col_step: int,
):
batch_size, _, num_heads, hidden_dim = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([height * width for height, width in value_spatial_shapes_list], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level_id, (height, width) in enumerate(value_spatial_shapes_list):
# batch_size, height*width, num_heads, hidden_dim
# -> batch_size, height*width, num_heads*hidden_dim
# -> batch_size, num_heads*hidden_dim, height*width
# -> batch_size*num_heads, hidden_dim, height, width
value_l_ = (
value_list[level_id]
.flatten(2)
.transpose(1, 2)
.reshape(batch_size * num_heads, hidden_dim, height, width)
)
# batch_size, num_queries, num_heads, num_points, 2
# -> batch_size, num_heads, num_queries, num_points, 2
# -> batch_size*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
# batch_size*num_heads, hidden_dim, num_queries, num_points
sampling_value_l_ = nn.functional.grid_sample(
value_l_,
sampling_grid_l_,
mode="bilinear",
padding_mode="zeros",
align_corners=False,
)
sampling_value_list.append(sampling_value_l_)
# (batch_size, num_queries, num_heads, num_levels, num_points)
# -> (batch_size, num_heads, num_queries, num_levels, num_points)
# -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
batch_size * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(batch_size, num_heads * hidden_dim, num_queries)
)
return output.transpose(1, 2).contiguous()
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the GroundingDinoDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
"""
)
| MultiScaleDeformableAttention |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor.py | {
"start": 30812,
"end": 42689
} | class ____(DenseSpec, type_spec.BatchableTypeSpec,
trace_type.Serializable, internal.TensorSpec):
"""Describes the type of a tf.Tensor.
>>> t = tf.constant([[1,2,3],[4,5,6]])
>>> tf.TensorSpec.from_tensor(t)
TensorSpec(shape=(2, 3), dtype=tf.int32, name=None)
Contains metadata for describing the nature of `tf.Tensor` objects
accepted or returned by some TensorFlow APIs.
For example, it can be used to constrain the type of inputs accepted by
a tf.function:
>>> @tf.function(input_signature=[tf.TensorSpec([1, None])])
... def constrained_foo(t):
... print("tracing...")
... return t
Now the `tf.function` is able to assume that `t` is always of the type
`tf.TensorSpec([1, None])` which will avoid retracing as well as enforce the
type restriction on inputs.
As a result, the following call with tensor of type `tf.TensorSpec([1, 2])`
triggers a trace and succeeds:
>>> constrained_foo(tf.constant([[1., 2]])).numpy()
tracing...
array([[1., 2.]], dtype=float32)
The following subsequent call with tensor of type `tf.TensorSpec([1, 4])`
does not trigger a trace and succeeds:
>>> constrained_foo(tf.constant([[1., 2, 3, 4]])).numpy()
array([[1., 2., 3., 4.], dtype=float32)
But the following call with tensor of type `tf.TensorSpec([2, 2])` fails:
>>> constrained_foo(tf.constant([[1., 2], [3, 4]])).numpy()
Traceback (most recent call last):
...
TypeError: Binding inputs to tf.function `constrained_foo` failed ...
"""
__slots__ = []
@classmethod
def experimental_type_proto(cls) -> Type[struct_pb2.TensorSpecProto]:
"""Returns the type of proto associated with TensorSpec serialization."""
return struct_pb2.TensorSpecProto
@classmethod
def experimental_from_proto(
cls, proto: struct_pb2.TensorSpecProto) -> "TensorSpec":
"""Returns a TensorSpec instance based on the serialized proto."""
return TensorSpec(
shape=tensor_shape.TensorShape.experimental_from_proto(proto.shape),
dtype=proto.dtype,
name=proto.name if proto.name else None)
def experimental_as_proto(self) -> struct_pb2.TensorSpecProto:
"""Returns a proto representation of the TensorSpec instance."""
return struct_pb2.TensorSpecProto(
shape=self.shape.experimental_as_proto(),
dtype=self.dtype.experimental_as_proto().datatype,
name=self.name)
def is_compatible_with(self, spec_or_tensor): # pylint:disable=useless-super-delegation,arguments-renamed
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return super(TensorSpec, self).is_compatible_with(spec_or_tensor)
def is_subtype_of(self, other):
if not isinstance(other, TensorSpec):
return False
return (
(not self.name or self.name == other.name)
and self.shape.is_subtype_of(other.shape)
and self.dtype.is_subtype_of(other.dtype)
)
def placeholder_value(self, placeholder_context):
"""Generates a graph placeholder with the given TensorSpec information."""
if placeholder_context.unnest_only:
return self
name = self.name or placeholder_context.naming_scope
context_graph = placeholder_context.context_graph
if placeholder_context.with_none_control_dependencies:
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with context_graph.control_dependencies(None):
placeholder = self._graph_placeholder(context_graph, name=name)
else:
placeholder = self._graph_placeholder(context_graph, name=name)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
handle_data = self.dtype._handle_data # pylint: disable=protected-access
if (
handle_data is not None
and handle_data.shape_inference.is_set
and handle_data.shape_inference.shape_and_type
):
handle_data_util.set_handle_data(placeholder, handle_data.shape_inference)
# Record the composite device as an attribute to the placeholder.
# This attribute would be propagated into the arg_attr of the FunctionDef.
# Currently, a packed eager tensor is always placed on a CompositeDevice.
if placeholder_context.composite_device_name is not None:
placeholder.op._set_attr( # pylint: disable=protected-access
"_composite_device",
attr_value_pb2.AttrValue(s=compat.as_bytes(
placeholder_context.composite_device_name)))
return placeholder
def _graph_placeholder(self, graph, name=None):
"""Graph-only version of tf.compat.v1.placeholder(), for internal use only."""
dtype = self.dtype.base_dtype
shape = self.shape
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
attrs = {"dtype": dtype_value, "shape": shape}
try:
op = graph._create_op_internal( # pylint: disable=protected-access
"Placeholder", [], [dtype], input_types=[],
attrs=attrs, name=name)
except ValueError as e:
# TODO(b/262413656) Sometimes parameter names are not valid op names, in
# which case an unnamed placeholder is created instead. Update this logic
# to sanitize the name instead of falling back on unnamed placeholders.
logging.warning(e)
op = graph._create_op_internal( # pylint: disable=protected-access
"Placeholder", [], [dtype], input_types=[], attrs=attrs)
(result,) = op.outputs
if op_callbacks.should_invoke_op_callbacks():
# TODO(b/147670703): Once the special-op creation code paths
# are unified. Remove this `if` block.
callback_outputs = op_callbacks.invoke_op_callbacks(
"Placeholder", tuple(), attrs, tuple(op.outputs),
op_name=name, graph=graph)
if callback_outputs is not None:
(result,) = callback_outputs
return result
def to_tensors(self, value):
value = self.cast(value, trace_type.InternalCastContext())
if not value.shape.is_subtype_of(self.shape):
raise TypeError(
f"Received tensor of shape {value.shape} instead of {self.shape}"
)
return [value]
def from_tensors(self, tensors):
tensor = next(tensors)
handle_data = self.dtype._handle_data # pylint: disable=protected-access
if handle_data:
handle_data_util.set_handle_data(tensor, handle_data.shape_inference)
return tensor
def flatten(self):
return [self]
def cast(self, value, casting_context):
"""Cast value to a tensor that is a subtype of this TensorSpec."""
# This method is mainly used to cast Python primitives to tensor.
# Currently, cast tensor to tensor with different types are not supported.
# For example, casting int32 to float32 would raise a ValueError.
if casting_context.allow_specs and isinstance(value, TensorSpec):
assert value.is_subtype_of(self), f"Can not cast {value!r} to {self!r}"
return self
if not isinstance(value, Tensor):
value = tensor_conversion_registry.convert(value, self.dtype)
value_spec = TensorSpec(value.shape, value.dtype, self.name)
if not value_spec.is_subtype_of(self):
if self.is_subtype_of(value_spec):
value.set_shape(self.shape)
else:
raise TypeError(f"Can not cast {value_spec!r} to {self!r}")
return value
def _alias_id(self):
"""Returns an id specifying identical tensors to avoid duplication."""
alias_id = None
if self.dtype._handle_data: # pylint: disable=protected-access
alias_id = self.dtype._handle_data.alias_id # pylint: disable=protected-access
return alias_id
@classmethod
def from_spec(cls, spec, name=None):
"""Returns a `TensorSpec` with the same shape and dtype as `spec`.
>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="OriginalName")
>>> tf.TensorSpec.from_spec(spec, "NewName")
TensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName')
Args:
spec: The `TypeSpec` used to create the new `TensorSpec`.
name: The name for the new `TensorSpec`. Defaults to `spec.name`.
"""
return cls(spec.shape, spec.dtype, name or spec.name)
@classmethod
def from_tensor(cls, tensor, name=None):
"""Returns a `TensorSpec` that describes `tensor`.
>>> tf.TensorSpec.from_tensor(tf.constant([1, 2, 3]))
TensorSpec(shape=(3,), dtype=tf.int32, name=None)
Args:
tensor: The `tf.Tensor` that should be described.
name: A name for the `TensorSpec`. Defaults to `tensor.op.name`.
Returns:
A `TensorSpec` that describes `tensor`.
"""
if isinstance(tensor, core_tf_types.Value):
return TensorSpec(tensor.shape, tensor.dtype, name)
elif isinstance(tensor, core_tf_types.Symbol):
# TODO(b/249802365): Return a sanitized version of op name or no name.
return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)
else:
raise ValueError(
f"`tensor` should be a tf.Tensor, but got type {type(tensor)}.")
@property
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec."""
return Tensor
def _to_components(self, value):
assert isinstance(value, core_tf_types.Tensor)
return value
def _from_components(self, components):
return components
def _from_compatible_tensor_list(self, tensor_list):
# TODO(b/112266545): It would be cleaner to create a new `ensure_shape()`
# op here and return that, instead of mutating the input's shape using
# `Tensor.set_shape()`. However, that would add extra ops, which could
# impact performance. When this bug is resolved, we should be able to add
# the `ensure_shape()` ops and optimize them away using contextual shape
# information.
assert len(tensor_list) == 1
tensor_list[0].set_shape(self._shape)
return tensor_list[0]
def _to_batchable_tensor_list(self, value, batched=False):
if batched and self._shape.merge_with(value.shape).ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return self._to_components(value)
def _batch(self, batch_size):
return TensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return TensorSpec(self._shape[1:], self._dtype)
@property
def _flat_tensor_specs(self):
return [self]
def _to_tensor_list(self, value):
return [self._to_components(value)]
def _to_batched_tensor_list(self, value):
return self._to_tensor_list(value)
# TODO(b/206014848): Helper function to support logic that does not consider
# Tensor name. Will be removed once load-bearing usages of Tensor name are
# fixed.
def _without_tensor_names(self) -> "TensorSpec":
"""Returns a version of `TensorSpec` with the name removed."""
if self.name is None:
return self
else:
return TensorSpec(self.shape, self.dtype)
trace_type.register_serializable(TensorSpec)
trace_type.register_tensor_type(TensorSpec)
| TensorSpec |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/triggers/test_message_bus.py | {
"start": 9021,
"end": 12178
} | class ____:
"""Test integration scenarios and edge cases."""
@pytest.mark.asyncio
async def test_multiple_messages_processing(self):
"""Test processing multiple messages in sequence."""
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook"):
trigger = AzureServiceBusQueueTrigger(
queues=["test_queue"],
poll_interval=0.01, # Very short for testing
)
messages_as_str = ["msg1", "msg2", "msg3"]
mock_messages = [Mock(body=msg.encode("utf-8")) for msg in messages_as_str]
trigger.message_hook.read_message = Mock(side_effect=mock_messages + [None])
# Collect events
events = []
async for event in trigger.run():
events.append(event)
if len(events) >= 3:
break
assert len(events) == 3
received_messages = [event.payload["message"] for event in events]
assert received_messages == messages_as_str
def test_queue_trigger_with_empty_queues_list(self):
"""Test queue trigger with empty queues list."""
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook"):
trigger = AzureServiceBusQueueTrigger(queues=[])
assert trigger.queues == []
def test_subscription_trigger_with_empty_topics_list(self):
"""Test subscription trigger with empty topics list."""
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook"):
trigger = AzureServiceBusSubscriptionTrigger(
topics=[], subscription_name="test-sub", azure_service_bus_conn_id="test_conn"
)
assert trigger.topics == []
def test_message_hook_initialization(self):
"""Test that MessageHook is properly initialized."""
conn_id = "test_connection"
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook") as mock_hook_class:
trigger = AzureServiceBusQueueTrigger(queues=["test"], azure_service_bus_conn_id=conn_id)
# Verify the hook was initialized with the correct connection ID
mock_hook_class.assert_called_once_with(azure_service_bus_conn_id=conn_id)
# Also verify the trigger has the message_hook attribute
assert hasattr(trigger, "message_hook")
def test_message_hook_properly_configured(self):
"""Test that MessageHook is properly configured with connection."""
conn_id = "test_connection"
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook") as mock_hook_class:
trigger = AzureServiceBusQueueTrigger(queues=["test"], azure_service_bus_conn_id=conn_id)
# Verify the hook was called with the correct parameters
mock_hook_class.assert_called_once_with(azure_service_bus_conn_id=conn_id)
assert hasattr(trigger, "message_hook")
# Verify the connection_id is set correctly
assert trigger.connection_id == conn_id
| TestIntegrationScenarios |
python | catalyst-team__catalyst | catalyst/loggers/wandb.py | {
"start": 301,
"end": 6295
} | class ____(ILogger):
"""Wandb logger for parameters, metrics, images and other artifacts.
W&B documentation: https://docs.wandb.com
Args:
Project: Name of the project in W&B to log to.
name: Name of the run in W&B to log to.
config: Configuration Dictionary for the experiment.
entity: Name of W&B entity(team) to log to.
log_batch_metrics: boolean flag to log batch metrics
(default: SETTINGS.log_batch_metrics or False).
log_epoch_metrics: boolean flag to log epoch metrics
(default: SETTINGS.log_epoch_metrics or True).
kwargs: Optional,
additional keyword arguments to be passed directly to the wandb.init
Python API examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...,
loggers={"wandb": dl.WandbLogger(project="wandb_test", name="expeirment_1")}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"wandb": dl.WandbLogger(project="wandb_test", name="experiment_1")
}
# ...
runner = CustomRunner().run()
"""
def __init__(
self,
project: str,
name: Optional[str] = None,
entity: Optional[str] = None,
log_batch_metrics: bool = SETTINGS.log_batch_metrics,
log_epoch_metrics: bool = SETTINGS.log_epoch_metrics,
**kwargs,
) -> None:
super().__init__(
log_batch_metrics=log_batch_metrics, log_epoch_metrics=log_epoch_metrics
)
if self.log_batch_metrics:
warnings.warn(
"Wandb does NOT support several x-axes for logging."
"For this reason, everything has to be logged in the batch-based regime."
)
self.project = project
self.name = name
self.entity = entity
self.run = wandb.init(
project=self.project,
name=self.name,
entity=self.entity,
allow_val_change=True,
**kwargs,
)
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return self.run
def _log_metrics(
self, metrics: Dict[str, float], step: int, loader_key: str, prefix=""
):
for key, value in metrics.items():
self.run.log({f"{key}_{prefix}/{loader_key}": value}, step=step)
def log_artifact(
self,
tag: str,
runner: "IRunner",
artifact: object = None,
path_to_artifact: str = None,
scope: str = None,
) -> None:
"""Logs artifact (arbitrary file like audio, video, weights) to the logger."""
if artifact is None and path_to_artifact is None:
ValueError("Both artifact and path_to_artifact cannot be None")
artifact = wandb.Artifact(
name=self.run.id + "_aritfacts",
type="artifact",
metadata={"loader_key": runner.loader_key, "scope": scope},
)
if artifact:
art_file_dir = os.path.join("wandb", self.run.id, "artifact_dumps")
os.makedirs(art_file_dir, exist_ok=True)
art_file = open(os.path.join(art_file_dir, tag), "wb")
pickle.dump(artifact, art_file)
art_file.close()
artifact.add_file(str(os.path.join(art_file_dir, tag)))
else:
artifact.add_file(path_to_artifact)
self.run.log_artifact(artifact)
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to the logger."""
if scope == "batch" or scope == "loader":
log_path = "_".join(
[tag, f"epoch-{runner.epoch_step:04d}", f"loader-{runner.loader}"]
)
elif scope == "epoch":
log_path = "_".join([tag, f"epoch-{runner.epoch_step:04d}"])
elif scope == "experiment" or scope is None:
log_path = tag
step = runner.sample_step if self.log_batch_metrics else runner.epoch_step
self.run.log({f"{log_path}.png": wandb.Image(image)}, step=step)
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs hyperparameters to the logger."""
self.run.config.update(hparams)
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs batch and epoch metrics to wandb."""
step = runner.sample_step if self.log_batch_metrics else runner.epoch_step
if scope == "batch" and self.log_batch_metrics:
metrics = {k: float(v) for k, v in metrics.items()}
self._log_metrics(
metrics=metrics,
step=step,
loader_key=runner.loader_key,
prefix="batch",
)
elif scope == "loader" and self.log_epoch_metrics:
self._log_metrics(
metrics=metrics,
step=step,
loader_key=runner.loader_key,
prefix="epoch",
)
elif scope == "epoch" and self.log_epoch_metrics:
loader_key = "_epoch_"
per_loader_metrics = metrics[loader_key]
self._log_metrics(
metrics=per_loader_metrics,
step=step,
loader_key=loader_key,
prefix="epoch",
)
def flush_log(self) -> None:
"""Flushes the logger."""
pass
def close_log(self, scope: str = None) -> None:
"""Closes the logger."""
self.run.finish()
__all__ = ["WandbLogger"]
| WandbLogger |
python | scipy__scipy | scipy/io/_fortran.py | {
"start": 561,
"end": 737
} | class ____(TypeError, OSError):
"""Indicates that the file ended mid-record.
Descends from TypeError for backward compatibility.
"""
pass
| FortranFormattingError |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF066.py | {
"start": 510,
"end": 1185
} | class ____(metaclass=abc.ABCMeta): # Test properies inside of an ABC class
@property
@abc.abstractmethod
def abstr_prop1(self): ... # OK: Abstract methods doesn't need to return anything
@property
@abc.abstractmethod
def abstr_prop2(self): # OK: Abstract methods doesn't need to return anything
"""
A cool docstring
"""
@property
def prop1(self): # OK: Returning a value
return 1
@property
def prop2(self): # ERROR: Not returning something (even when we are inside an ABC)
50
def method(self): # OK: Not a property
x = 1
def func(): # OK: Not a property
x = 1
| UserMeta |
python | streamlit__streamlit | lib/streamlit/elements/widgets/radio.py | {
"start": 2623,
"end": 16250
} | class ____:
@overload
def radio(
self,
label: str,
options: Sequence[Never],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
captions: Sequence[str] | None = None,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> None: ...
@overload
def radio(
self,
label: str,
options: OptionSequence[T],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
captions: Sequence[str] | None = None,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> T: ...
@overload
def radio(
self,
label: str,
options: OptionSequence[T],
index: None,
format_func: Callable[[Any], Any] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
captions: Sequence[str] | None = None,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> T | None: ...
@gather_metrics("radio")
def radio(
self,
label: str,
options: OptionSequence[T],
index: int | None = 0,
format_func: Callable[[Any], Any] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
captions: Sequence[str] | None = None,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> T | None:
r"""Display a radio button widget.
Parameters
----------
label : str
A short label explaining to the user what this radio group is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
options : Iterable
Labels for the select options in an ``Iterable``. This can be a
``list``, ``set``, or anything supported by ``st.dataframe``. If
``options`` is dataframe-like, the first column will be used. Each
label will be cast to ``str`` internally by default.
Labels can include markdown as described in the ``label`` parameter
and will be cast to str internally by default.
index : int or None
The index of the preselected option on first render. If ``None``,
will initialize empty and return ``None`` until the user selects an option.
Defaults to 0 (the first option).
format_func : function
Function to modify the display of radio options. It receives
the raw option as an argument and should output the label to be
shown for that option. This has no impact on the return value of
the radio.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this radio's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the radio button if set to
``True``. The default is ``False``.
horizontal : bool
An optional boolean, which orients the radio group horizontally.
The default is false (vertical buttons).
captions : iterable of str or None
A list of captions to show below each radio button. If None (default),
no captions are shown.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "content", "stretch", or int
The width of the radio button widget. This can be one of the
following:
- ``"content"`` (default): The width of the widget matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the widget matches the width of the
parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
any
The selected option or ``None`` if no option is selected.
This is a copy of the selected option, not the original.
Example
-------
>>> import streamlit as st
>>>
>>> genre = st.radio(
... "What's your favorite movie genre",
... [":rainbow[Comedy]", "***Drama***", "Documentary :movie_camera:"],
... captions=[
... "Laugh out loud.",
... "Get the popcorn.",
... "Never stop learning.",
... ],
... )
>>>
>>> if genre == ":rainbow[Comedy]":
... st.write("You selected comedy.")
... else:
... st.write("You didn't select comedy.")
.. output::
https://doc-radio.streamlit.app/
height: 300px
To initialize an empty radio widget, use ``None`` as the index value:
>>> import streamlit as st
>>>
>>> genre = st.radio(
... "What's your favorite movie genre",
... [":rainbow[Comedy]", "***Drama***", "Documentary :movie_camera:"],
... index=None,
... )
>>>
>>> st.write("You selected:", genre)
.. output::
https://doc-radio-empty.streamlit.app/
height: 300px
"""
ctx = get_script_run_ctx()
return self._radio(
label=label,
options=options,
index=index,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
horizontal=horizontal,
captions=captions,
label_visibility=label_visibility,
ctx=ctx,
width=width,
)
def _radio(
self,
label: str,
options: OptionSequence[T],
index: int | None = 0,
format_func: Callable[[Any], Any] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
label_visibility: LabelVisibility = "visible",
captions: Sequence[str] | None = None,
ctx: ScriptRunContext | None,
width: Width = "content",
) -> T | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None if index == 0 else index,
)
maybe_raise_label_warnings(label, label_visibility)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
opt = convert_anything_to_list(options)
check_python_comparable(opt)
element_id = compute_and_register_element_id(
"radio",
user_key=key,
# Treat provided key as the main widget identity. Only include the
# following parameters in the identity computation since they can
# invalidate the current selection mapping.
# Changes to format_func also invalidate the current selection,
# but this is already handled via the `options` parameter below:
key_as_main_identity={"options"},
dg=self.dg,
label=label,
options=[str(format_func(option)) for option in opt],
index=index,
help=help,
horizontal=horizontal,
captions=captions,
width=width,
)
if not isinstance(index, int) and index is not None:
raise StreamlitAPIException(
f"Radio Value has invalid type: {type(index).__name__}"
)
if index is not None and len(opt) > 0 and not 0 <= index < len(opt):
raise StreamlitAPIException(
"Radio index must be between 0 and length of options"
)
def handle_captions(caption: str | None) -> str:
if caption is None:
return ""
if isinstance(caption, str):
return caption
raise StreamlitAPIException(
f"Radio captions must be strings. Passed type: {type(caption).__name__}"
)
session_state = get_session_state().filtered_state
if key is not None and key in session_state and session_state[key] is None:
index = None
radio_proto = RadioProto()
radio_proto.id = element_id
radio_proto.label = label
if index is not None:
radio_proto.default = index
radio_proto.options[:] = [str(format_func(option)) for option in opt]
radio_proto.form_id = current_form_id(self.dg)
radio_proto.horizontal = horizontal
radio_proto.disabled = disabled
radio_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if captions is not None:
radio_proto.captions[:] = map(handle_captions, captions)
if help is not None:
radio_proto.help = dedent(help)
serde = RadioSerde(opt, index)
widget_state = register_widget(
radio_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="int_value",
)
widget_state = maybe_coerce_enum(widget_state, options, opt)
if widget_state.value_changed:
if widget_state.value is not None:
serialized_value = serde.serialize(widget_state.value)
if serialized_value is not None:
radio_proto.value = serialized_value
radio_proto.set_value = True
if ctx:
save_for_app_testing(ctx, element_id, format_func)
self.dg._enqueue("radio", radio_proto, layout_config=layout_config)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| RadioMixin |
python | Lightning-AI__lightning | tests/tests_pytorch/loops/test_fetchers.py | {
"start": 9106,
"end": 9246
} | class ____:
def __init__(self, val: Any) -> None:
self.val = val
def wait(self) -> Any:
return self.val
| DummyWaitable |
python | modin-project__modin | modin/config/envvars.py | {
"start": 26892,
"end": 28051
} | class ____(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value: int) -> None:
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls) -> int:
"""
Get default value of the config.
Returns
-------
int
"""
return CpuCount.get()
@classmethod
def get(cls) -> int:
"""
Get ``NPartitions`` with extra checks.
Returns
-------
int
"""
nparts = super().get()
if nparts <= 0:
raise ValueError(f"`NPartitions` should be > 0; current value: {nparts}")
return nparts
| NPartitions |
python | huggingface__transformers | src/transformers/models/xglm/modeling_xglm.py | {
"start": 24373,
"end": 28972
} | class ____(XGLMPreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = XGLMModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits,
labels,
vocab_size=self.config.vocab_size,
pad_token_id=self.config.pad_token_id,
**kwargs,
)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = ["XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel"]
| XGLMForCausalLM |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/analytics_admin.py | {
"start": 10988,
"end": 14077
} | class ____(GoogleCloudBaseOperator):
"""
Creates Data stream.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAnalyticsAdminCreateDataStreamOperator`
:param property_id: ID of the parent property for the data stream.
:param data_stream: The data stream to create.
For more details see: https://developers.google.com/analytics/devguides/config/admin/v1/rest/v1beta/properties.dataStreams#DataStream
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"gcp_conn_id",
"impersonation_chain",
"property_id",
"data_stream",
)
def __init__(
self,
*,
property_id: str,
data_stream: DataStream | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.property_id = property_id
self.data_stream = data_stream
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(
self,
context: Context,
) -> Message:
hook = GoogleAnalyticsAdminHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating a Google Analytics data stream.")
data_stream = hook.create_data_stream(
property_id=self.property_id,
data_stream=self.data_stream,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("The Google Analytics data stream %s was created successfully.", data_stream.name)
return DataStream.to_dict(data_stream)
| GoogleAnalyticsAdminCreateDataStreamOperator |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 13317,
"end": 13628
} | class ____(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j * np.arctan(10)
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
| Pi |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol32.py | {
"start": 492,
"end": 841
} | class ____(Generic[Arg, Value]):
def method1(self, default: Value) -> Value:
return default
def method2(self, default: Value) -> Value:
return default
def another(self, arg: Arg) -> None:
return
def func1(arg: Arg, value: Value) -> Interface[Arg, Value]:
return Implementation1[Arg, Value]()
| Implementation1 |
python | mlflow__mlflow | mlflow/server/graphql/graphql_errors.py | {
"start": 18,
"end": 221
} | class ____(graphene.ObjectType):
# NOTE: This is not an exhaustive list, might need to add more things in the future if needed.
field = graphene.String()
message = graphene.String()
| ErrorDetail |
python | huggingface__transformers | tests/models/colpali/test_processing_colpali.py | {
"start": 1105,
"end": 12379
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = ColPaliProcessor
@classmethod
def _setup_tokenizer(cls):
return GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True)
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
image_processor = image_processor_class.from_pretrained("google/siglip-so400m-patch14-384")
image_processor.image_seq_length = 0
return image_processor
@unittest.skip("ColpaliProcessor can only process one of text or images at a time")
def test_processor_with_multiple_inputs(self):
pass
@unittest.skip("ColpaliProcessor adds a prefix and suffix to the text")
def test_tokenizer_defaults(self):
pass
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
@require_torch
@require_vision
def test_process_images(self):
# Processor configuration
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_images(images=image_input, return_tensors="pt")
# Assertions
self.assertIn("pixel_values", batch_feature)
self.assertEqual(batch_feature["pixel_values"].shape, torch.Size([1, 3, 384, 384]))
@require_torch
@require_vision
def test_process_queries(self):
# Inputs
queries = [
"Is attention really all you need?",
"Are Benjamin, Antoine, Merve, and Jo best friends?",
]
# Processor configuration
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_queries(text=queries, return_tensors="pt")
# Assertions
self.assertIn("input_ids", batch_feature)
self.assertIsInstance(batch_feature["input_ids"], torch.Tensor)
self.assertEqual(batch_feature["input_ids"].shape[0], len(queries))
# The following tests override the parent tests because ColPaliProcessor can only take one of images or text as input at a time.
def test_tokenizer_defaults_preserved_by_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt")
self.assertEqual(inputs[self.text_input_name].shape[-1], 117)
def test_image_processor_defaults_preserved_by_image_kwargs(self):
"""
We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor.
We then check that the mean of the pixel_values is less than or equal to 0 after processing.
Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
"""
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=-1.0
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_kwargs_overrides_default_tokenizer_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt", max_length=112, padding="max_length")
self.assertEqual(inputs[self.text_input_name].shape[-1], 112)
def test_kwargs_overrides_default_image_processor_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_unstructured_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(
text=input_str,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_doubly_passed_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
images=image_input,
images_kwargs={"do_rescale": True, "rescale_factor": -1.0},
do_rescale=True,
return_tensors="pt",
)
def test_structured_kwargs_nested(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_structured_kwargs_nested_from_dict(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(images=image_input, **all_kwargs)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
# Can process only text or images at a time
def test_model_input_names(self):
processor = self.get_processor()
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input)
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
@unittest.skip("ColPali can't process text+image inputs at the same time")
def test_processor_text_has_no_visual(self):
pass
| ColPaliProcessorTest |
python | geekcomputers__Python | brickout-game/brickout-game.py | {
"start": 779,
"end": 2643
} | class ____(object):
def __init__(self, screen, radius, x, y):
self.__screen = screen
self._radius = radius
self._xLoc = x
self._yLoc = y
self.__xVel = 7
self.__yVel = 2
w, h = pygame.display.get_surface().get_size()
self.__width = w
self.__height = h
def getXVel(self):
return self.__xVel
def getYVel(self):
return self.__yVel
def draw(self):
"""
draws the ball onto screen.
"""
pygame.draw.circle(screen, (255, 0, 0), (self._xLoc, self._yLoc), self._radius)
def update(self, paddle, brickwall):
"""
moves the ball at the screen.
contains some collision detection.
"""
self._xLoc += self.__xVel
self._yLoc += self.__yVel
# left screen wall bounce
if self._xLoc <= self._radius:
self.__xVel *= -1
# right screen wall bounce
elif self._xLoc >= self.__width - self._radius:
self.__xVel *= -1
# top wall bounce
if self._yLoc <= self._radius:
self.__yVel *= -1
# bottom drop out
elif self._yLoc >= self.__width - self._radius:
return True
# for bouncing off the bricks.
if brickwall.collide(self):
self.__yVel *= -1
# collision detection between ball and paddle
paddleY = paddle._yLoc
paddleW = paddle._width
paddleH = paddle._height
paddleX = paddle._xLoc
ballX = self._xLoc
ballY = self._yLoc
if ((ballX + self._radius) >= paddleX and ballX <= (paddleX + paddleW)) and (
(ballY + self._radius) >= paddleY and ballY <= (paddleY + paddleH)
):
self.__yVel *= -1
return False
"""
Simple class for representing a paddle
"""
| Ball |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1338,
"end": 1533
} | class ____(typing.TypedDict):
my_var: int | str
# Check dataclasses
def my_decorator(*args, **kwargs):
def wraps(*args, **kwargs):
pass
return wraps
@dataclass
| CustomTypedDict4 |
python | pypa__warehouse | warehouse/packaging/models.py | {
"start": 31470,
"end": 31737
} | class ____(str, enum.Enum):
bdist_dmg = "bdist_dmg"
bdist_dumb = "bdist_dumb"
bdist_egg = "bdist_egg"
bdist_msi = "bdist_msi"
bdist_rpm = "bdist_rpm"
bdist_wheel = "bdist_wheel"
bdist_wininst = "bdist_wininst"
sdist = "sdist"
| PackageType |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_code_execution_tool_result_block_param.py | {
"start": 466,
"end": 827
} | class ____(TypedDict, total=False):
content: Required[BetaCodeExecutionToolResultBlockParamContentParam]
tool_use_id: Required[str]
type: Required[Literal["code_execution_tool_result"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
| BetaCodeExecutionToolResultBlockParam |
python | Farama-Foundation__Gymnasium | gymnasium/vector/async_vector_env.py | {
"start": 1102,
"end": 1307
} | class ____(Enum):
"""The AsyncVectorEnv possible states given the different actions."""
DEFAULT = "default"
WAITING_RESET = "reset"
WAITING_STEP = "step"
WAITING_CALL = "call"
| AsyncState |
python | realpython__materials | python-self-type/accounts.py | {
"start": 618,
"end": 1605
} | class ____(BankAccount):
interest_rate: float
@classmethod
def from_application(
cls, deposit: float = 0, interest_rate: float = 1
) -> Self:
# Generate a random seven-digit bank account number
account_number = random.randint(1000000, 9999999)
return cls(account_number, deposit, interest_rate)
def calculate_interest(self) -> float:
return self.balance * self.interest_rate / 100
def add_interest(self) -> Self:
self.deposit(self.calculate_interest())
return self
account = BankAccount(account_number=1534899324, balance=50)
(
account.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.display_balance()
)
savings = SavingsAccount.from_application(deposit=100, interest_rate=5)
(
savings.display_balance()
.add_interest()
.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.add_interest()
.display_balance()
)
| SavingsAccount |
python | pdm-project__pdm | src/pdm/cli/hooks.py | {
"start": 164,
"end": 1469
} | class ____:
def __init__(self, project: Project, skip: list[str] | None = None):
self.project = project
self.skip = skip or []
@contextlib.contextmanager
def skipping(self, *names: str) -> Generator[None]:
"""
Temporarily skip some hooks.
"""
old_skip = self.skip[:]
self.skip.extend(names)
yield
self.skip = old_skip
@property
def skip_all(self) -> bool:
return ":all" in self.skip
@property
def skip_pre(self) -> bool:
return ":pre" in self.skip
@property
def skip_post(self) -> bool:
return ":post" in self.skip
def should_run(self, name: str) -> bool:
"""
Tells whether a task given its name should run or not
according to the current skipping rules.
"""
return (
not self.skip_all
and name not in self.skip
and not (self.skip_pre and name.startswith("pre_"))
and not (self.skip_post and name.startswith("post_"))
)
def try_emit(self, name: str, **kwargs: Any) -> None:
"""
Emit a hook signal if rules allow it.
"""
if self.should_run(name):
pdm_signals.signal(name).send(self.project, hooks=self, **kwargs)
| HookManager |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 8597,
"end": 8912
} | class ____(TypedDict, total=False):
summary: Optional[str]
description: Optional[str]
value: Optional[Any]
externalValue: Optional[AnyUrl]
if PYDANTIC_V2: # type: ignore [misc]
__pydantic_config__ = {"extra": "allow"}
else:
class Config:
extra = "allow"
| Example |
python | numba__llvmlite | versioneer.py | {
"start": 36877,
"end": 40346
} | class ____(Command):
description = ("install/upgrade Versioneer files: "
"__init__.py SRC/_version.py")
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
| cmd_update_files |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial008.py | {
"start": 112,
"end": 273
} | class ____(BaseModel):
url: HttpUrl
name: str
@app.post("/images/multiple/")
async def create_multiple_images(images: List[Image]):
return images
| Image |
python | doocs__leetcode | solution/2800-2899/2852.Sum of Remoteness of All Cells/Solution.py | {
"start": 0,
"end": 756
} | class ____:
def sumRemoteness(self, grid: List[List[int]]) -> int:
def dfs(i: int, j: int) -> (int, int):
s, t = grid[i][j], 1
grid[i][j] = 0
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < n and 0 <= y < n and grid[x][y] > 0:
s1, t1 = dfs(x, y)
s, t = s + s1, t + t1
return s, t
n = len(grid)
dirs = (-1, 0, 1, 0, -1)
cnt = sum(x > 0 for row in grid for x in row)
ans = 0
for i, row in enumerate(grid):
for j, x in enumerate(row):
if x > 0:
s, t = dfs(i, j)
ans += (cnt - t) * s
return ans
| Solution |
python | sympy__sympy | sympy/core/symbol.py | {
"start": 14086,
"end": 16132
} | class ____(Symbol):
"""Dummy symbols are each unique, even if they have the same name:
Examples
========
>>> from sympy import Dummy
>>> Dummy("x") == Dummy("x")
False
If a name is not supplied then a string value of an internal count will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important.
>>> Dummy() #doctest: +SKIP
_Dummy_10
"""
# In the rare event that a Dummy object needs to be recreated, both the
# `name` and `dummy_index` should be passed. This is used by `srepr` for
# example:
# >>> d1 = Dummy()
# >>> d2 = eval(srepr(d1))
# >>> d2 == d1
# True
#
# If a new session is started between `srepr` and `eval`, there is a very
# small chance that `d2` will be equal to a previously-created Dummy.
_count = 0
_prng = random.Random()
_base_dummy_index = _prng.randint(10**6, 9*10**6)
__slots__ = ('dummy_index',)
is_Dummy = True
def __new__(cls, name: str | None = None,
dummy_index: int | None = None,
**assumptions: bool | None) -> Self:
if dummy_index is not None:
assert name is not None, "If you specify a dummy_index, you must also provide a name"
if name is None:
name = "Dummy_" + str(Dummy._count)
if dummy_index is None:
dummy_index = Dummy._base_dummy_index + Dummy._count
Dummy._count += 1
cls._sanitize(assumptions, cls)
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.dummy_index = dummy_index
return obj
def __getnewargs_ex__(self):
return ((self.name, self.dummy_index), self._assumptions_orig)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (
2, (self.name, self.dummy_index)), S.One.sort_key(), S.One
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
| Dummy |
python | gevent__gevent | src/gevent/select.py | {
"start": 7973,
"end": 13582
} | class ____(object):
"""
An implementation of :obj:`select.poll` that blocks only the current greenlet.
With only one exception, the interface is the same as the standard library interface.
.. caution:: ``POLLPRI`` data is not supported.
.. versionadded:: 1.1b1
.. versionchanged:: 1.5
This is now always defined, regardless of whether the standard library
defines :func:`select.poll` or not. Note that it may have different performance
characteristics.
"""
def __init__(self):
# {int -> flags}
# We can't keep watcher objects in here because people commonly
# just drop the poll object when they're done, without calling
# unregister(). dnspython does this.
self.fds = {}
self.loop = get_hub().loop
def register(self, fd, eventmask=_NONE):
"""
Register a file descriptor *fd* with the polling object.
Future calls to the :meth:`poll`` method will then check
whether the file descriptor has any pending I/O events. *fd* can
be either an integer, or an object with a ``fileno()`` method that
returns an integer. File objects implement ``fileno()``, so they
can also be used as the argument (but remember that regular
files are usually always ready).
*eventmask* is an optional bitmask describing the type of events
you want to check for, and can be a combination of the
constants ``POLLIN``, and ``POLLOUT`` (``POLLPRI`` is not supported).
"""
if eventmask is _NONE:
flags = _EV_READ | _EV_WRITE
else:
flags = 0
if eventmask & POLLIN:
flags = _EV_READ
if eventmask & POLLOUT:
flags |= _EV_WRITE
# If they ask for POLLPRI, we can't support
# that. Should we raise an error?
fileno = get_fileno(fd)
self.fds[fileno] = flags
def modify(self, fd, eventmask):
"""
Change the set of events being watched on *fd*.
"""
self.register(fd, eventmask)
def _get_started_watchers(self, poll_result):
watchers = []
io = self.loop.io
MAXPRI = self.loop.MAXPRI
watcher_cb = poll_result.add_event
try:
for fd, flags in self.fds.items():
try:
watcher = io(fd, flags)
except OSError as ex:
if ex.errno != EBADF:
raise
poll_result.add_error_before_io(fd)
continue
watchers.append(watcher)
watcher.priority = MAXPRI
watcher.start(watcher_cb, fd, pass_events=True)
except:
for awatcher in watchers:
awatcher.stop()
awatcher.close()
raise
return watchers
def poll(self, timeout=None):
"""
poll the registered fds.
.. versionchanged:: 1.2a1
File descriptors that are closed are reported with POLLNVAL.
.. versionchanged:: 1.3a2
Under libuv, interpret *timeout* values less than 0 the same as *None*,
i.e., block. This was always the case with libev.
"""
result = PollResult()
watchers = self._get_started_watchers(result)
try:
if timeout is not None:
if timeout < 0:
# The docs for python say that an omitted timeout,
# a negative timeout and a timeout of None are all
# supposed to block forever. Many, but not all
# OS's accept any negative number to mean that. Some
# OS's raise errors for anything negative but not -1.
# Python 3.7 changes to always pass exactly -1 in that
# case from selectors.
# Our Timeout class currently does not have a defined behaviour
# for negative values. On libuv, it uses a check watcher and effectively
# doesn't block. On libev, it seems to block. In either case, we
# *want* to block, so turn this into the sure fire block request.
timeout = None
elif timeout:
# The docs for poll.poll say timeout is in
# milliseconds. Our result objects work in
# seconds, so this should be *=, shouldn't it?
timeout /= 1000.0
result.event.wait(timeout=timeout)
return list(result.events)
finally:
for awatcher in watchers:
awatcher.stop()
awatcher.close()
def unregister(self, fd):
"""
Unregister the *fd*.
.. versionchanged:: 1.2a1
Raise a `KeyError` if *fd* was not registered, like the standard
library. Previously gevent did nothing.
"""
fileno = get_fileno(fd)
del self.fds[fileno]
def _gevent_do_monkey_patch(patch_request):
aggressive = patch_request.patch_kwargs['aggressive']
patch_request.default_patch_items()
if aggressive:
# since these are blocking we're removing them here. This makes some other
# modules (e.g. asyncore) non-blocking, as they use select that we provide
# when none of these are available.
patch_request.remove_item(
'epoll',
'kqueue',
'kevent',
'devpoll',
)
| poll |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_with_reference_param.py | {
"start": 263,
"end": 953
} | class ____(TypedDict, total=False):
id: str
"""
ID of a previous conversation item to reference (for `item_reference` content
types in `response.create` events). These can reference both client and server
created items.
"""
audio: str
"""Base64-encoded audio bytes, used for `input_audio` content type."""
text: str
"""The text content, used for `input_text` and `text` content types."""
transcript: str
"""The transcript of the audio, used for `input_audio` content type."""
type: Literal["input_text", "input_audio", "item_reference", "text"]
"""The content type (`input_text`, `input_audio`, `item_reference`, `text`)."""
| Content |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_access.py | {
"start": 6993,
"end": 7302
} | class ____(OrganizationAccessMixin, TestCase):
"""Test organization paths with authed org owner."""
def login(self):
return self.client.login(username="eric", password="test")
def is_admin(self):
return True
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
| OrganizationOwnerAccess |
python | Lightning-AI__lightning | examples/pytorch/servable_module/production.py | {
"start": 2832,
"end": 4212
} | class ____(LitModule, ServableModule):
def configure_payload(self):
# 1: Access the train dataloader and load a single sample.
image, _ = self.trainer.train_dataloader.dataset[0]
# 2: Convert the image into a PIL Image to bytes and encode it with base64
pil_image = T.ToPILImage()(image)
buffered = BytesIO()
pil_image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("UTF-8")
return {"body": {"x": img_str}}
def configure_serialization(self):
return {"x": Image(224, 224).deserialize}, {"output": Top1().serialize}
def serve_step(self, x: torch.Tensor) -> dict[str, torch.Tensor]:
return {"output": self.model(x)}
def configure_response(self):
return {"output": 7}
def cli_main():
cli = LightningCLI(
ProductionReadyModel,
CIFAR10DataModule,
seed_everything_default=42,
save_config_kwargs={"overwrite": True},
run=False,
trainer_defaults={
"accelerator": "cpu",
"callbacks": [ServableModuleValidator()],
"max_epochs": 1,
"limit_train_batches": 5,
"limit_val_batches": 5,
},
)
cli.trainer.fit(cli.model, cli.datamodule)
if __name__ == "__main__":
cli_lightning_logo()
cli_main()
| ProductionReadyModel |
python | huggingface__transformers | src/transformers/models/ovis2/modular_ovis2.py | {
"start": 12783,
"end": 17176
} | class ____(LlavaForConditionalGeneration, GenerationMixin):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
def get_image_features(self, pixel_values: torch.FloatTensor):
return self.model.get_image_features(pixel_values=pixel_values)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, Ovis2CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Ovis2ForConditionalGeneration
>>> model = Ovis2ForConditionalGeneration.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> prompt = "<|im_start|>user\n<image>\nDescribe the image.<|im_end|>\n<|im_start|>assistant\n"
>>> url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
"user\n\nDescribe the image.\nassistant\nThe image features a brown dog standing on a wooden floor, looking up with"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Ovis2CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["Ovis2PreTrainedModel", "Ovis2Model", "Ovis2ForConditionalGeneration"]
| Ovis2ForConditionalGeneration |
python | jazzband__django-polymorphic | src/polymorphic/admin/helpers.py | {
"start": 420,
"end": 727
} | class ____(InlineAdminForm):
"""
Expose the admin configuration for a form
"""
def polymorphic_ctype_field(self):
return AdminField(self.form, "polymorphic_ctype", False)
@property
def is_empty(self):
return "__prefix__" in self.form.prefix
| PolymorphicInlineAdminForm |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 49283,
"end": 49789
} | class ____(sgqlc.types.Enum):
"""The default permission a repository can have in an Organization.
Enumeration Choices:
* `ADMIN`: Can read, clone, push, and add collaborators to
repositories.
* `NONE`: No default permission value.
* `READ`: Can read and clone repositories.
* `WRITE`: Can read, clone and push to repositories.
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "NONE", "READ", "WRITE")
| OrgUpdateDefaultRepositoryPermissionAuditEntryPermission |
python | pypa__hatch | backend/src/hatchling/plugin/manager.py | {
"start": 1669,
"end": 3396
} | class ____:
def __init__(self, registration_method: Callable, identifier: str, third_party_plugins: ThirdPartyPlugins) -> None:
self.registration_method = registration_method
self.identifier = identifier
self.third_party_plugins = third_party_plugins
def collect(self, *, include_third_party: bool = True) -> dict:
if include_third_party and not self.third_party_plugins.loaded:
self.third_party_plugins.load()
classes: dict[str, type] = {}
for raw_registered_classes in self.registration_method():
registered_classes = (
raw_registered_classes if isinstance(raw_registered_classes, list) else [raw_registered_classes]
)
for registered_class in registered_classes:
name = getattr(registered_class, self.identifier, None)
if not name: # no cov
message = f"Class `{registered_class.__name__}` does not have a {name} attribute."
raise ValueError(message)
if name in classes: # no cov
message = (
f"Class `{registered_class.__name__}` defines its name as `{name}` but "
f"that name is already used by `{classes[name].__name__}`."
)
raise ValueError(message)
classes[name] = registered_class
return classes
def get(self, name: str) -> type | None:
if not self.third_party_plugins.loaded:
classes = self.collect(include_third_party=False)
if name in classes:
return classes[name]
return self.collect().get(name)
| ClassRegister |
python | walkccc__LeetCode | solutions/144. Binary Tree Preorder Traversal/144.py | {
"start": 0,
"end": 296
} | class ____:
def preorderTraversal(self, root: TreeNode | None) -> list[int]:
ans = []
def preorder(root: TreeNode | None) -> None:
if not root:
return
ans.append(root.val)
preorder(root.left)
preorder(root.right)
preorder(root)
return ans
| Solution |
python | django__django | django/contrib/postgres/operations.py | {
"start": 3577,
"end": 3707
} | class ____(CreateExtension):
def __init__(self, hints=None):
super().__init__("unaccent", hints=hints)
| UnaccentExtension |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 14559,
"end": 15581
} | class ____:
@property
def apps(self):
return Apps()
@property
def name(self):
return "excel"
@property
def type(self):
return "desktop"
@staticmethod
def prepare_xl_data_element(x, date_format):
if isinstance(x, time_types):
return _datetime_to_com_time(x)
elif pd and pd.isna(x):
return ""
elif np and isinstance(x, (np.floating, float)) and np.isnan(x):
return ""
elif np and isinstance(x, np.number):
return float(x)
elif x is None:
return ""
else:
return x
@staticmethod
def clean_value_data(data, datetime_builder, empty_as, number_builder, err_to_str):
return [
[
_clean_value_data_element(
c, datetime_builder, empty_as, number_builder, err_to_str
)
for c in row
]
for row in data
]
engine = Engine()
| Engine |
python | getsentry__sentry | tests/sentry/search/eap/test_uptime_results.py | {
"start": 542,
"end": 5867
} | class ____(TestCase):
def setUp(self) -> None:
self.resolver = SearchResolver(
params=SnubaParams(),
config=SearchResolverConfig(),
definitions=UPTIME_RESULT_DEFINITIONS,
)
def test_simple_query(self) -> None:
where, having, _ = self.resolver.resolve_query("check_status:error")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="check_status", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="error"),
)
)
assert having is None
def test_negation(self) -> None:
where, having, _ = self.resolver.resolve_query("!check_status:success")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="check_status", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_NOT_EQUALS,
value=AttributeValue(val_str="success"),
)
)
assert having is None
def test_in_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("region:[us-east-1,us-west-1,eu-west-1]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="region", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_IN,
value=AttributeValue(
val_str_array=StrArray(values=["us-east-1", "us-west-1", "eu-west-1"])
),
)
)
assert having is None
def test_numeric_comparison(self) -> None:
where, having, _ = self.resolver.resolve_query("request_duration_us:>1000")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="request_duration_us", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_GREATER_THAN,
value=AttributeValue(val_int=1000),
)
)
assert having is None
def test_http_status_code_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("http_status_code:[200,404,500]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="http_status_code", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_int_array=IntArray(values=[200, 404, 500])),
)
)
assert having is None
def test_query_with_and(self) -> None:
where, having, _ = self.resolver.resolve_query("check_status:error region:us-east-1")
assert where == TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="check_status", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="error"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="region", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="us-east-1"),
)
),
]
)
)
assert having is None
def test_query_with_or(self) -> None:
where, having, _ = self.resolver.resolve_query("check_status:error or http_status_code:500")
assert where == TraceItemFilter(
or_filter=OrFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="check_status", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="error"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="http_status_code", type=AttributeKey.Type.TYPE_INT
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_int=500),
)
),
]
)
)
assert having is None
def test_empty_query(self) -> None:
where, having, _ = self.resolver.resolve_query("")
assert where is None
assert having is None
def test_none_query(self) -> None:
where, having, _ = self.resolver.resolve_query(None)
assert where is None
assert having is None
| SearchResolverQueryTest |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 883,
"end": 6845
} | class ____(ABC):
"""An abstract class that compares and then scrubs some set of fields that, by a more nuanced
definition than mere strict byte-for-byte equality, are expected to maintain some relation on
otherwise equivalent JSON instances of the same model.
Each class inheriting from `JSONScrubbingComparator` should override the abstract `compare`
method with its own comparison logic. The `scrub` method merely moves the compared fields from
the `fields` dictionary to the non-diffed `scrubbed` dictionary, and may optionally be wrapped
if extra scrubbing logic is necessary.
If multiple comparators are used sequentially on a single model (see the `SCRUBBING_COMPARATORS`
dict below for specific mappings), all of the `compare(...)` methods are called before any of
the `scrub(...)` methods are. This ensures that comparators that touch the same fields do not
have their inputs mangled by one another."""
def __init__(self, *fields: str):
self.fields = set(fields)
def check(self, side: Side, data: Any) -> None:
"""Ensure that we have received valid JSON data at runtime."""
if "model" not in data or not isinstance(data["model"], str):
raise RuntimeError(f"The {side.name} input must have a `model` string assigned to it.")
if "ordinal" not in data or not isinstance(data["ordinal"], int):
raise RuntimeError(f"The {side.name} input must have a numerical `ordinal` entry.")
if "fields" not in data or not isinstance(data["fields"], dict):
raise RuntimeError(f"The {side.name} input must have a `fields` dictionary.")
@abstractmethod
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
"""An abstract method signature, to be implemented by inheriting classes with their own
comparison logic. Implementations of this method MUST take care not to mutate the method's
inputs!"""
def existence(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
"""Ensure that all tracked fields on either both models or neither."""
findings = []
for f in self.fields:
missing_on_left = f not in left["fields"] or left["fields"][f] is None
missing_on_right = f not in right["fields"] or right["fields"][f] is None
if missing_on_left and missing_on_right:
continue
if missing_on_left:
findings.append(
ComparatorFinding(
kind=self.get_kind_existence_check(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"the left `{f}` value was missing",
)
)
if missing_on_right:
findings.append(
ComparatorFinding(
kind=self.get_kind_existence_check(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"the right `{f}` value was missing",
)
)
return findings
def __scrub__(
self,
left: Any,
right: Any,
f: (
Callable[[list[str]], list[str]] | Callable[[list[str]], ScrubbedData]
) = lambda _: ScrubbedData.SCRUBBED_DATA,
) -> None:
"""Removes all of the fields compared by this comparator from the `fields` dict, so that the
remaining fields may be compared for equality. Public callers should use the inheritance-safe wrapper, `scrub`, rather than using this internal method directly.
Parameters:
- on: An `InstanceID` that must be shared by both versions of the JSON model being compared.
- left: One of the models being compared (usually the "before") version.
- right: The other model it is being compared against (usually the "after" or
post-processed version).
- f: Optional helper method that populates the RHS of the scrubbed entry. If this is
omitted, the scrubbed entry defaults to `True`.
"""
self.check(Side.left, left)
self.check(Side.right, right)
if "scrubbed" not in left:
left["scrubbed"] = {}
if "scrubbed" not in right:
right["scrubbed"] = {}
for field in self.fields:
for side in [left, right]:
if side["fields"].get(field) is None:
# Normalize fields that are literally `None` vs those that are totally absent.
if field in side["fields"]:
del side["fields"][field]
side["scrubbed"][f"{self.get_kind().name}::{field}"] = None
continue
value = side["fields"][field]
value = [value] if not isinstance(value, list) else value
del side["fields"][field]
side["scrubbed"][f"{self.get_kind().name}::{field}"] = f(value)
def scrub(
self,
left: Any,
right: Any,
) -> None:
self.__scrub__(left, right)
def get_kind(self) -> ComparatorFindingKind:
"""A unique identifier for this particular derivation of JSONScrubbingComparator, which will
be bubbled up in ComparatorFindings when they are generated."""
return ComparatorFindingKind.__members__[self.__class__.__name__]
def get_kind_existence_check(self) -> ComparatorFindingKind:
"""A unique identifier for the existence check of this particular derivation of
JSONScrubbingComparator, which will be bubbled up in ComparatorFindings when they are
generated."""
return ComparatorFindingKind.__members__[self.__class__.__name__ + "ExistenceCheck"]
| JSONScrubbingComparator |
python | sphinx-doc__sphinx | sphinx/ext/inheritance_diagram.py | {
"start": 12530,
"end": 12658
} | class ____(graphviz):
"""A docutils node to use as a placeholder for the inheritance diagram."""
pass
| inheritance_diagram |
python | numba__numba | numba/core/ir.py | {
"start": 32236,
"end": 33834
} | class ____(EqualityCheckMixin, AbstractRHS):
"""
Attributes
-----------
- scope: Scope
- name: str
- loc: Loc
Definition location
"""
def __init__(self, scope, name, loc):
# NOTE: Use of scope=None should be removed.
assert scope is None or isinstance(scope, Scope)
assert isinstance(name, str)
assert isinstance(loc, Loc)
self.scope = scope
self.name = name
self.loc = loc
def __repr__(self):
return 'Var(%s, %s)' % (self.name, self.loc.short())
def __str__(self):
return self.name
@property
def is_temp(self):
return self.name.startswith("$")
@property
def unversioned_name(self):
"""The unversioned name of this variable, i.e. SSA renaming removed
"""
for k, redef_set in self.scope.var_redefinitions.items():
if self.name in redef_set:
return k
return self.name
@property
def versioned_names(self):
"""Known versioned names for this variable, i.e. known variable names in
the scope that have been formed from applying SSA to this variable
"""
return self.scope.get_versions_of(self.unversioned_name)
@property
def all_names(self):
"""All known versioned and unversioned names for this variable
"""
return self.versioned_names | {self.unversioned_name,}
def __deepcopy__(self, memo):
out = Var(copy.deepcopy(self.scope, memo), self.name, self.loc)
memo[id(self)] = out
return out
| Var |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/yield_in_init.py | {
"start": 96,
"end": 195
} | class ____:
def __init__(self):
yield from self.gen()
def gen(self):
yield 5
| B |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/templates.py | {
"start": 1001,
"end": 3260
} | class ____(gast.NodeTransformer):
"""Adjusts the ctx field of nodes to ensure consistency.
This transformer can change the ctx fields of a variable, tuple and other
AST elements that allow one, based on whether the element is being read or
written.
"""
def __init__(self, override_value):
self._ctx_override = override_value
def visit(self, node):
original_override = self._ctx_override
node = super(ContextAdjuster, self).visit(node)
if hasattr(node, 'ctx'):
assert node.ctx is not None, 'node {} has ctx unset'.format(node)
self._ctx_override = original_override
return node
def _apply_override(self, node):
if self._ctx_override is not None:
node.ctx = self._ctx_override()
def visit_Attribute(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node = self.generic_visit(node)
return node
def visit_Tuple(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_List(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Name(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Call(self, node):
self._apply_override(node)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Dict(self, node):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Subscript(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node.value = self.visit(node.value)
return self.generic_visit(node)
def visit_comprehension(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Lambda(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
| ContextAdjuster |
python | anthropics__anthropic-sdk-python | src/anthropic/types/cache_control_ephemeral_param.py | {
"start": 226,
"end": 529
} | class ____(TypedDict, total=False):
type: Required[Literal["ephemeral"]]
ttl: Literal["5m", "1h"]
"""The time-to-live for the cache control breakpoint.
This may be one the following values:
- `5m`: 5 minutes
- `1h`: 1 hour
Defaults to `5m`.
"""
| CacheControlEphemeralParam |
python | automl__auto-sklearn | autosklearn/util/dask.py | {
"start": 2106,
"end": 3972
} | class ____(Dask):
def __init__(self, n_jobs: int | None = None) -> None:
self.n_jobs = n_jobs
self._client: Client | None = None
self._cluster: LocalCluster | None = None
def client(self) -> Client:
"""Creates a usable dask client or returns an existing one
If there is not current client, because it has been closed, create
a new one.
* If ``n_jobs == 1``, create a ``SingleThreadedClient``
* Else create a ``Client`` with a ``LocalCluster``
"""
if self._client is not None:
return self._client
if self.n_jobs == 1:
cluster = None
client = SingleThreadedClient()
else:
cluster = LocalCluster(
n_workers=self.n_jobs,
processes=False,
threads_per_worker=1,
# We use tmpdir to save the workers as deleting workers takes
# more time than deleting backend directories.
# This prevent an error saying that the worker file was deleted,
# so the client could not close the worker properly
local_directory=tempfile.gettempdir(),
# Memory is handled by the pynisher, not by the dask worker/nanny
memory_limit=0,
)
client = Client(cluster, heartbeat_interval=10000) # 10s
self._client = client
self._cluster = cluster
return self._client
def close(self) -> None:
"""Closes any open dask client"""
if self._client is None:
return
self._client.close()
if self._cluster is not None:
self._cluster.close()
self._client = None
self._cluster = None
def __repr__(self) -> str:
return f"LocalDask(n_jobs = {self.n_jobs})"
| LocalDask |
python | huggingface__transformers | src/transformers/models/llava_next/processing_llava_next.py | {
"start": 1403,
"end": 12615
} | class ____(ProcessorMixin):
r"""
Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
[`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
Args:
image_processor ([`LlavaNextImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*):
Patch size from the vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
num_additional_image_tokens (`int`, *optional*, defaults to 0):
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
extra tokens appended, no need to set this arg.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
num_additional_image_tokens=0,
**kwargs,
):
self.patch_size = patch_size
self.num_additional_image_tokens = num_additional_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[LlavaNextProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text.")
output_kwargs = self._merge_kwargs(
LlavaNextProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = text
if image_inputs:
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0]))
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
if not isinstance(image_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
image_size = image_size.tolist()
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = height // self.patch_size
patches_width = width // self.patch_size
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (+1 for the CLS)
base_features = patches_height * patches_width + self.num_additional_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (list[list[str]], *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaNextProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
size = (
(size["shortest_edge"], size["shortest_edge"])
if "shortest_edge" in size
else (min(size["height"], size["width"]), min(size["height"], size["width"]))
)
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes) # llava-next doesn't batch pixels as Idefics, thus `1` patch`
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(
orig_height, orig_width, processed_height, processed_width
)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["LlavaNextProcessor"]
| LlavaNextProcessor |
python | pytorch__pytorch | test/distributed/checkpoint/test_file_system_checkpoint.py | {
"start": 5141,
"end": 7188
} | class ____(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_accelerator_dist_backend()
@parametrize("extensions", [None, [Rot13Example()], [ZStandard()]])
def test_read_write_shard_tensor(self, extensions) -> None:
paths = [tempfile.mkdtemp()]
dist.broadcast_object_list(paths)
path = paths[0]
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
spec = ChunkShardingSpec(
dim=0,
placements=[
f"rank:0/{device_type}:0",
f"rank:1/{device_type}:1",
],
)
model_to_save = MyShardedModel1(spec, init_rrefs=False)
# Test save
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path, _extensions=extensions)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
dist.barrier()
# Create a new model
model_to_load = MyShardedModel1(spec, init_rrefs=False)
# This is not the correct hook for loading the state dict
# model_to_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
dist.barrier()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Test load.
fs_reader = FileSystemReader(
path=path, _extension_registry=get_test_extension_registry()
)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
dist.barrier()
| TestDistributedStateDictSaveLoadWithSharedTensor |
python | Pylons__pyramid | tests/test_testing.py | {
"start": 139,
"end": 473
} | class ____(unittest.TestCase):
def _makeOne(self, environ):
from pyramid.testing import DummyRootFactory
return DummyRootFactory(environ)
def test_it(self):
environ = {'bfg.routes.matchdict': {'a': 1}}
factory = self._makeOne(environ)
self.assertEqual(factory.a, 1)
| TestDummyRootFactory |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_index.py | {
"start": 631,
"end": 3029
} | class ____(Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (SuperuserOrStaffFeatureFlaggedPermission,)
def get(self, request: Request) -> Response:
queryset = User.objects.distinct()
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "query":
joined = " ".join(value)
queryset = queryset.filter(
Q(name__icontains=joined)
| Q(username__icontains=joined)
| Q(email__icontains=joined)
| Q(emails__email__icontains=joined)
)
elif key == "id":
queryset = queryset.filter(
id__in=[request.user.id if v == "me" else v for v in value]
)
elif key == "name":
queryset = queryset.filter(in_iexact("name", value))
elif key == "email":
queryset = queryset.filter(in_iexact("email", value))
elif key == "username":
queryset = queryset.filter(in_iexact("username", value))
elif key == "is":
for v in value:
if v == "superuser":
queryset = queryset.filter(is_superuser=True)
else:
queryset = queryset.none()
elif key == "permission":
queryset = queryset.filter(
userpermission__permission__in=[v.lower() for v in value]
)
else:
queryset = queryset.none()
status = request.GET.get("status")
if status == "active":
queryset = queryset.filter(is_active=True)
elif status == "disabled":
queryset = queryset.filter(is_active=False)
order_by = "-date_joined"
paginator_cls = DateTimePaginator
return self.paginate(
request=request,
queryset=queryset,
order_by=order_by,
on_results=lambda x: serialize(x, request.user, UserSerializer()),
paginator_cls=paginator_cls,
)
| UserIndexEndpoint |
python | getsentry__sentry | src/sentry/api/serializers/models/apiapplication.py | {
"start": 207,
"end": 934
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
is_secret_visible = obj.date_added > timezone.now() - timedelta(minutes=5)
return {
"id": obj.client_id,
"clientID": obj.client_id,
"clientSecret": obj.client_secret if is_secret_visible else None,
"name": obj.name,
"homepageUrl": obj.homepage_url,
"privacyUrl": obj.privacy_url,
"termsUrl": obj.terms_url,
"allowedOrigins": obj.get_allowed_origins(),
"redirectUris": obj.get_redirect_uris(),
"scopes": obj.scopes,
"requiresOrgLevelAccess": obj.requires_org_level_access,
}
| ApiApplicationSerializer |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 56287,
"end": 56385
} | class ____:
key: str
full_type_name: str
@whitelist_for_serdes
@record
| ComponentInstanceSnap |
python | doocs__leetcode | solution/0500-0599/0593.Valid Square/Solution.py | {
"start": 0,
"end": 792
} | class ____:
def validSquare(
self, p1: List[int], p2: List[int], p3: List[int], p4: List[int]
) -> bool:
def check(a, b, c):
(x1, y1), (x2, y2), (x3, y3) = a, b, c
d1 = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)
d2 = (x1 - x3) * (x1 - x3) + (y1 - y3) * (y1 - y3)
d3 = (x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3)
return any(
[
d1 == d2 and d1 + d2 == d3 and d1,
d2 == d3 and d2 + d3 == d1 and d2,
d1 == d3 and d1 + d3 == d2 and d1,
]
)
return (
check(p1, p2, p3)
and check(p2, p3, p4)
and check(p1, p3, p4)
and check(p1, p2, p4)
)
| Solution |
python | geekcomputers__Python | BlackJack_game/blackjack_simulate.py | {
"start": 2799,
"end": 5378
} | class ____:
def __init__(self, amount):
"""
:param amount: the chips you own
"""
self._amount = amount
self._bet_amount = 0
self._insurance = 0
self.is_insurance = False
self.is_double = False
def __bool__(self):
return self.amount > 0
@staticmethod
def get_tips(content):
fmt_tips = "{color}** TIPS: {content}! **{end}"
return fmt_tips.format(
color=COLOR.get("YELLOW"), content=content, end=COLOR.get("END")
)
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
if not isinstance(value, int):
type_tips = "Please give a integer"
raise ValueError(Chips.get_tips(type_tips))
if value < 0:
amount_tips = "Your integer should bigger than 0"
raise ValueError(Chips.get_tips(amount_tips))
self._amount = value
@property
def bet_amount(self):
return self._bet_amount
@bet_amount.setter
def bet_amount(self, value):
type_tips = "Please give a integer"
amount_tips = "Your chips should between 1 - " + str(self.amount) + " "
try:
value = int(value)
except ValueError:
raise ValueError(Chips.get_tips(type_tips))
else:
if not isinstance(value, int):
raise ValueError(Chips.get_tips(type_tips))
if (value <= 0) or (value > self.amount):
raise ValueError(Chips.get_tips(amount_tips))
self._bet_amount = value
def double_bet(self):
if self.can_double():
self._bet_amount *= 2
self.is_double = True
else:
over_tips = "Not enough chips || "
cannot_double = "CAN'T DO DOUBLE"
raise ValueError(Chips.get_tips(over_tips + cannot_double))
@property
def insurance(self):
return self._insurance
@insurance.setter
def insurance(self, value):
if self.amount - value < 0:
over_tips = "Not enough chips"
raise ValueError(Chips.get_tips(over_tips))
self._insurance = value
self.is_insurance = True
def current_amount(self):
return self.amount - self.bet_amount - self.insurance
def reset_chip(self):
self._bet_amount = 0
self._insurance = 0
self.is_double = False
self.is_insurance = False
def can_double(self):
return self.current_amount() - self.bet_amount >= 0
| Chips |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 17390,
"end": 17631
} | class ____(TestCollectedFacts):
gather_subset = ['!all']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
| TestMinimalCollectedFacts |
python | plotly__plotly.py | plotly/graph_objs/bar/marker/_colorbar.py | {
"start": 233,
"end": 61532
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar.marker"
_path_str = "bar.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.bar.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.bar.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.bar.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.bar.marker.col
orbar.tickformatstopdefaults), sets the default property values
to use for elements of bar.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.bar.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.bar.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.bar.marker.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.bar.ma
rker.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
bar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.bar.marker.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.bar.marker.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.bar.ma
rker.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
bar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.bar.marker.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 16096,
"end": 18089
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: CLIPSegConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPSegAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPSegMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
@auto_docstring
| CLIPSegEncoderLayer |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 66142,
"end": 66970
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("Content-Language", "en_US")
self.write("hello")
def test_304_headers(self):
response1 = self.fetch("/")
self.assertEqual(response1.headers["Content-Length"], "5")
self.assertEqual(response1.headers["Content-Language"], "en_US")
response2 = self.fetch(
"/", headers={"If-None-Match": response1.headers["Etag"]}
)
self.assertEqual(response2.code, 304)
self.assertNotIn("Content-Length", response2.headers)
self.assertNotIn("Content-Language", response2.headers)
# Not an entity header, but should not be added to 304s by chunking
self.assertNotIn("Transfer-Encoding", response2.headers)
| Header304Test |
python | pandas-dev__pandas | pandas/io/stata.py | {
"start": 116051,
"end": 122284
} | class ____:
"""
Converter for Stata StrLs
Stata StrLs map 8 byte values to strings which are stored using a
dictionary-like format where strings are keyed to two values.
Parameters
----------
df : DataFrame
DataFrame to convert
columns : Sequence[str]
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
byteorder : str, optional
Can be ">", "<", "little", or "big". default is `sys.byteorder`
Notes
-----
Supports creation of the StrL block of a dta file for dta versions
117, 118 and 119. These differ in how the GSO is stored. 118 and
119 store the GSO lookup value as a uint32 and a uint64, while 117
uses two uint32s. 118 and 119 also encode all strings as unicode
which is required by the format. 117 uses 'latin-1' a fixed width
encoding that extends the 7-bit ascii table with an additional 128
characters.
"""
def __init__(
self,
df: DataFrame,
columns: Sequence[str],
version: int = 117,
byteorder: str | None = None,
) -> None:
if version not in (117, 118, 119):
raise ValueError("Only dta versions 117, 118 and 119 supported")
self._dta_ver = version
self.df = df
self.columns = columns
self._gso_table = {"": (0, 0)}
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
# Flag whether chosen byteorder matches the system on which we're running
self._native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
gso_v_type = "I" # uint32
gso_o_type = "Q" # uint64
self._encoding = "utf-8"
if version == 117:
o_size = 4
gso_o_type = "I" # 117 used uint32
self._encoding = "latin-1"
elif version == 118:
o_size = 6
else: # version == 119
o_size = 5
if self._native_byteorder:
self._o_offet = 2 ** (8 * (8 - o_size))
else:
self._o_offet = 2 ** (8 * o_size)
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
def _convert_key(self, key: tuple[int, int]) -> int:
v, o = key
if self._native_byteorder:
return v + self._o_offet * o
else:
# v, o will be swapped when applying byteorder
return o + self._o_offet * v
def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]:
"""
Generates the GSO lookup table for the DataFrame
Returns
-------
gso_table : dict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on the dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None or pd.NA (GH 23633)
val = "" if isna(val) else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes:
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : dict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes("GSO", "ascii")
gso_type = struct.pack(self._byteorder + "B", 130)
null = struct.pack(self._byteorder + "B", 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + "I"
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
if isinstance(strl, str):
strl_convert = bytes(strl, "utf-8")
else:
strl_convert = strl
bio.write(struct.pack(len_type, len(strl_convert) + 1))
# xxx...xxx
bio.write(strl_convert)
bio.write(null)
return bio.getvalue()
| StataStrLWriter |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_common.py | {
"start": 1160,
"end": 2017
} | class ____:
def test_unique_name(self, spec):
properties = {
"id": {"type": "integer", "format": "int64"},
"name": {"type": "string", "example": "doggie"},
}
name = get_unique_schema_name(spec.components, "Pet")
assert name == "Pet"
spec.components.schema("Pet", properties=properties)
with pytest.warns(
UserWarning, match="Multiple schemas resolved to the name Pet"
):
name_1 = get_unique_schema_name(spec.components, "Pet")
assert name_1 == "Pet1"
spec.components.schema("Pet1", properties=properties)
with pytest.warns(
UserWarning, match="Multiple schemas resolved to the name Pet"
):
name_2 = get_unique_schema_name(spec.components, "Pet")
assert name_2 == "Pet2"
| TestUniqueName |
python | scipy__scipy | scipy/io/tests/test_paths.py | {
"start": 230,
"end": 3190
} | class ____:
data = np.arange(5).astype(np.int64)
def test_savemat(self):
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(path, {'data': self.data})
assert path.is_file()
def test_loadmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
mat_contents = scipy.io.loadmat(path)
assert (mat_contents['data'] == self.data).all()
def test_whosmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
contents = scipy.io.whosmat(path)
assert contents[0] == ('data', (1, 5), 'int64')
def test_readsav(self):
path = Path(__file__).parent / 'data/scalar_string.sav'
scipy.io.readsav(path)
def test_hb_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.eye_array(3, format='csr')
path = Path(temp_dir) / 'data.hb'
scipy.io.hb_write(str(path), data)
data_new = scipy.io.hb_read(path, spmatrix=False)
assert (data_new != data).nnz == 0
def test_hb_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.eye_array(3, format='csr')
path = Path(temp_dir) / 'data.hb'
scipy.io.hb_write(path, data)
assert path.is_file()
def test_mmio_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.eye_array(3, format='csr')
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(str(path), data)
data_new = scipy.io.mmread(path, spmatrix=False)
assert (data_new != data).nnz == 0
def test_mmio_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.eye_array(3, format='csr')
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(path, data)
def test_netcdf_file(self):
path = Path(__file__).parent / 'data/example_1.nc'
scipy.io.netcdf_file(path)
def test_wavfile_read(self):
path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
scipy.io.wavfile.read(path)
def test_wavfile_write(self):
# Read from str path, write to Path
input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
rate, data = scipy.io.wavfile.read(str(input_path))
with tempdir() as temp_dir:
output_path = Path(temp_dir) / input_path.name
scipy.io.wavfile.write(output_path, rate, data)
| TestPaths |
python | python__mypy | mypyc/ir/ops.py | {
"start": 24974,
"end": 25900
} | class ____(RegisterOp):
"""Load an error value.
Each type has one reserved value that signals an error (exception). This
loads the error value for a specific type.
"""
error_kind = ERR_NEVER
def __init__(
self, rtype: RType, line: int = -1, is_borrowed: bool = False, undefines: bool = False
) -> None:
super().__init__(line)
self.type = rtype
self.is_borrowed = is_borrowed
# Undefines is true if this should viewed by the definedness
# analysis pass as making the register it is assigned to
# undefined (and thus checks should be added on uses).
self.undefines = undefines
def sources(self) -> list[Value]:
return []
def set_sources(self, new: list[Value]) -> None:
assert not new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_load_error_value(self)
@final
| LoadErrorValue |
python | apache__airflow | task-sdk/src/airflow/sdk/exceptions.py | {
"start": 8424,
"end": 8891
} | class ____(AirflowException):
"""
Signal by an operator to skip its downstream tasks.
Special exception raised to signal that the operator it was raised from wishes to skip
downstream tasks. This is used in the ShortCircuitOperator.
:param tasks: List of task_ids to skip or a list of tuples with task_id and map_index to skip.
"""
def __init__(self, *, tasks):
super().__init__()
self.tasks = tasks
| DownstreamTasksSkipped |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_py310.py | {
"start": 602,
"end": 2809
} | class ____(Enum):
FOO = 1
BAR = 2
def check_value_if_then_match_return(example: Example, should_check: bool) -> str | None:
if should_check:
result = None
else:
match example:
case Example.FOO:
result = "foo"
case Example.BAR:
result = "bar"
case _:
return None
return result
def check_value_if_then_match_raise(example: Example, should_check: bool) -> str | None:
if should_check:
result = None
else:
match example:
case Example.FOO:
result = "foo"
case Example.BAR:
result = "bar"
case _:
raise ValueError("Not a valid enum")
return result
def check_value_if_then_match_assert_never(example: Example, should_check: bool) -> str | None:
if should_check:
result = None
else:
match example:
case Example.FOO:
result = "foo"
case Example.BAR:
result = "bar"
case _:
assert_never(example)
return result
def g(x):
if x is None:
y = 0
else:
match x:
case int():
y = x
case _:
raise TypeError(type(x))
return y
def check_value_if_then_match_nested(
example: Example, example_inner: Example, should_check: bool
) -> str | None:
if should_check:
result = None
else:
match example:
case Example.FOO:
match example_inner:
case Example.BAR:
result = "bar"
case _:
return None
case _:
return None
return result
def check_value_if_then_match_non_exhaustive(example: Example, should_check: bool) -> str | None:
if should_check:
result = None
else:
match example:
case Example.FOO:
result = "foo"
case Example.BAR:
pass
case _:
return None
return result # [possibly-used-before-assignment]
| Example |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 31400,
"end": 32861
} | class ____(Bar):
pass
"""
)
assert module_2.Foo.__pydantic_core_schema__['schema']['extras_schema'] == {'type': 'int'}
# TODO remove when we drop support for Python 3.10, in 3.11+ string annotations are properly evaluated
# in PEP 585 generics.
def test_pydantic_extra_forward_ref_evaluated_pep585() -> None:
class Bar(BaseModel, extra='allow'):
__pydantic_extra__: dict['str', int]
# This is a way to test that `'str'` is properly evaluated (for Python <3.11, see comments in
# `GenerateSchema._get_args_resolving_forward_refs()`) and as such `extra_keys_schema` isn't
# set because `str` is the default.
assert 'extras_keys_schema' not in Bar.__pydantic_core_schema__['schema']
@pytest.mark.xfail(
reason='While `get_cls_type_hints` uses the correct module ns for each base, `collect_model_fields` '
'will still use the `FieldInfo` instances from each base (see the `parent_fields_lookup` logic). '
'This means that `f` is still a forward ref in `Foo.model_fields`, and it gets evaluated in '
'`GenerateSchema._model_schema`, where only the module of `Foo` is considered.'
)
def test_uses_the_correct_globals_to_resolve_model_forward_refs(create_module):
@create_module
def module_1():
from pydantic import BaseModel
class Bar(BaseModel):
f: 'A'
A = int
module_2 = create_module(
f"""
from {module_1.__name__} import Bar
A = str
| Foo |
python | sympy__sympy | sympy/geometry/polygon.py | {
"start": 45281,
"end": 60494
} | class ____(Polygon):
"""
A regular polygon.
Such a polygon has all internal angles equal and all sides the same length.
Parameters
==========
center : Point
radius : number or Basic instance
The distance from the center to a vertex
n : int
The number of sides
Attributes
==========
vertices
center
radius
rotation
apothem
interior_angle
exterior_angle
circumcircle
incircle
angles
Raises
======
GeometryError
If the `center` is not a Point, or the `radius` is not a number or Basic
instance, or the number of sides, `n`, is less than three.
Notes
=====
A RegularPolygon can be instantiated with Polygon with the kwarg n.
Regular polygons are instantiated with a center, radius, number of sides
and a rotation angle. Whereas the arguments of a Polygon are vertices, the
vertices of the RegularPolygon must be obtained with the vertices method.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r
RegularPolygon(Point2D(0, 0), 5, 3, 0)
>>> r.vertices[0]
Point2D(5, 0)
"""
__slots__ = ('_n', '_center', '_radius', '_rot')
def __new__(self, c, r, n, rot=0, **kwargs):
r, n, rot = map(sympify, (r, n, rot))
c = Point(c, dim=2, **kwargs)
if not isinstance(r, Expr):
raise GeometryError("r must be an Expr object, not %s" % r)
if n.is_Number:
as_int(n) # let an error raise if necessary
if n < 3:
raise GeometryError("n must be a >= 3, not %s" % n)
obj = GeometryEntity.__new__(self, c, r, n, **kwargs)
obj._n = n
obj._center = c
obj._radius = r
obj._rot = rot % (2*S.Pi/n) if rot.is_number else rot
return obj
def _eval_evalf(self, prec=15, **options):
c, r, n, a = self.args
dps = prec_to_dps(prec)
c, r, a = [i.evalf(n=dps, **options) for i in (c, r, a)]
return self.func(c, r, n, a)
@property
def args(self):
"""
Returns the center point, the radius,
the number of sides, and the orientation angle.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.args
(Point2D(0, 0), 5, 3, 0)
"""
return self._center, self._radius, self._n, self._rot
def __str__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
def __repr__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
@property
def area(self):
"""Returns the area.
Examples
========
>>> from sympy import RegularPolygon
>>> square = RegularPolygon((0, 0), 1, 4)
>>> square.area
2
>>> _ == square.length**2
True
"""
c, r, n, rot = self.args
return sign(r)*n*self.length**2/(4*tan(pi/n))
@property
def length(self):
"""Returns the length of the sides.
The half-length of the side and the apothem form two legs
of a right triangle whose hypotenuse is the radius of the
regular polygon.
Examples
========
>>> from sympy import RegularPolygon
>>> from sympy import sqrt
>>> s = square_in_unit_circle = RegularPolygon((0, 0), 1, 4)
>>> s.length
sqrt(2)
>>> sqrt((_/2)**2 + s.apothem**2) == s.radius
True
"""
return self.radius*2*sin(pi/self._n)
@property
def center(self):
"""The center of the RegularPolygon
This is also the center of the circumscribing circle.
Returns
=======
center : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.center
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.center
Point2D(0, 0)
"""
return self._center
centroid = center
@property
def circumcenter(self):
"""
Alias for center.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.circumcenter
Point2D(0, 0)
"""
return self.center
@property
def radius(self):
"""Radius of the RegularPolygon
This is also the radius of the circumscribing circle.
Returns
=======
radius : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.radius
r
"""
return self._radius
@property
def circumradius(self):
"""
Alias for radius.
Examples
========
>>> from sympy import Symbol
>>> from sympy import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.circumradius
r
"""
return self.radius
@property
def rotation(self):
"""CCW angle by which the RegularPolygon is rotated
Returns
=======
rotation : number or instance of Basic
Examples
========
>>> from sympy import pi
>>> from sympy.abc import a
>>> from sympy import RegularPolygon, Point
>>> RegularPolygon(Point(0, 0), 3, 4, pi/4).rotation
pi/4
Numerical rotation angles are made canonical:
>>> RegularPolygon(Point(0, 0), 3, 4, a).rotation
a
>>> RegularPolygon(Point(0, 0), 3, 4, pi).rotation
0
"""
return self._rot
@property
def apothem(self):
"""The inradius of the RegularPolygon.
The apothem/inradius is the radius of the inscribed circle.
Returns
=======
apothem : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.apothem
sqrt(2)*r/2
"""
return self.radius * cos(S.Pi/self._n)
@property
def inradius(self):
"""
Alias for apothem.
Examples
========
>>> from sympy import Symbol
>>> from sympy import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.inradius
sqrt(2)*r/2
"""
return self.apothem
@property
def interior_angle(self):
"""Measure of the interior angles.
Returns
=======
interior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.interior_angle
3*pi/4
"""
return (self._n - 2)*S.Pi/self._n
@property
def exterior_angle(self):
"""Measure of the exterior angles.
Returns
=======
exterior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.exterior_angle
pi/4
"""
return 2*S.Pi/self._n
@property
def circumcircle(self):
"""The circumcircle of the RegularPolygon.
Returns
=======
circumcircle : Circle
See Also
========
circumcenter, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.circumcircle
Circle(Point2D(0, 0), 4)
"""
return Circle(self.center, self.radius)
@property
def incircle(self):
"""The incircle of the RegularPolygon.
Returns
=======
incircle : Circle
See Also
========
inradius, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 7)
>>> rp.incircle
Circle(Point2D(0, 0), 4*cos(pi/7))
"""
return Circle(self.center, self.apothem)
@property
def angles(self):
"""
Returns a dictionary with keys, the vertices of the Polygon,
and values, the interior angle at each vertex.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.angles
{Point2D(-5/2, -5*sqrt(3)/2): pi/3,
Point2D(-5/2, 5*sqrt(3)/2): pi/3,
Point2D(5, 0): pi/3}
"""
ret = {}
ang = self.interior_angle
for v in self.vertices:
ret[v] = ang
return ret
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
The general Polygon.encloses_point method is called only if
a point is not within or beyond the incircle or circumcircle,
respectively.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import RegularPolygon, S, Point, Symbol
>>> p = RegularPolygon((0, 0), 3, 4)
>>> p.encloses_point(Point(0, 0))
True
>>> r, R = p.inradius, p.circumradius
>>> p.encloses_point(Point((r + R)/2, 0))
True
>>> p.encloses_point(Point(R/2, R/2 + (R - r)/10))
False
>>> t = Symbol('t', real=True)
>>> p.encloses_point(p.arbitrary_point().subs(t, S.Half))
False
>>> p.encloses_point(Point(5, 5))
False
"""
c = self.center
d = Segment(c, p).length
if d >= self.radius:
return False
elif d < self.inradius:
return True
else:
# now enumerate the RegularPolygon like a general polygon.
return Polygon.encloses_point(self, p)
def spin(self, angle):
"""Increment *in place* the virtual Polygon's rotation by ccw angle.
See also: rotate method which moves the center.
>>> from sympy import Polygon, Point, pi
>>> r = Polygon(Point(0,0), 1, n=3)
>>> r.vertices[0]
Point2D(1, 0)
>>> r.spin(pi/6)
>>> r.vertices[0]
Point2D(sqrt(3)/2, 1/2)
See Also
========
rotation
rotate : Creates a copy of the RegularPolygon rotated about a Point
"""
self._rot += angle
def rotate(self, angle, pt=None):
"""Override GeometryEntity.rotate to first rotate the RegularPolygon
about its center.
>>> from sympy import Point, RegularPolygon, pi
>>> t = RegularPolygon(Point(1, 0), 1, 3)
>>> t.vertices[0] # vertex on x-axis
Point2D(2, 0)
>>> t.rotate(pi/2).vertices[0] # vertex on y axis now
Point2D(0, 2)
See Also
========
rotation
spin : Rotates a RegularPolygon in place
"""
r = type(self)(*self.args) # need a copy or else changes are in-place
r._rot += angle
return GeometryEntity.rotate(r, angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the radius that must be
scaled (if x == y) or else a new Polygon must be returned.
>>> from sympy import RegularPolygon
Symmetric scaling returns a RegularPolygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 2)
RegularPolygon(Point2D(0, 0), 2, 4, 0)
Asymmetric scaling returns a kite as a Polygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 1)
Polygon(Point2D(2, 0), Point2D(0, 1), Point2D(-2, 0), Point2D(0, -1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
if x != y:
return Polygon(*self.vertices).scale(x, y)
c, r, n, rot = self.args
r *= x
return self.func(c, r, n, rot)
def reflect(self, line):
"""Override GeometryEntity.reflect since this is not made of only
points.
Examples
========
>>> from sympy import RegularPolygon, Line
>>> RegularPolygon((0, 0), 1, 4).reflect(Line((0, 1), slope=-2))
RegularPolygon(Point2D(4/5, 2/5), -1, 4, atan(4/3))
"""
c, r, n, rot = self.args
v = self.vertices[0]
d = v - c
cc = c.reflect(line)
vv = v.reflect(line)
dd = vv - cc
# calculate rotation about the new center
# which will align the vertices
l1 = Ray((0, 0), dd)
l2 = Ray((0, 0), d)
ang = l1.closing_angle(l2)
rot += ang
# change sign of radius as point traversal is reversed
return self.func(cc, -r, n, rot)
@property
def vertices(self):
"""The vertices of the RegularPolygon.
Returns
=======
vertices : list
Each vertex is a Point.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.vertices
[Point2D(5, 0), Point2D(0, 5), Point2D(-5, 0), Point2D(0, -5)]
"""
c = self._center
r = abs(self._radius)
rot = self._rot
v = 2*S.Pi/self._n
return [Point(c.x + r*cos(k*v + rot), c.y + r*sin(k*v + rot))
for k in range(self._n)]
def __eq__(self, o):
if not isinstance(o, Polygon):
return False
elif not isinstance(o, RegularPolygon):
return Polygon.__eq__(o, self)
return self.args == o.args
def __hash__(self):
return super().__hash__()
| RegularPolygon |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-marketo/source_marketo/source.py | {
"start": 14535,
"end": 17074
} | class ____(MarketoExportBase):
"""
Base class for all the activities streams,
provides functionality for dynamically created classes as streams of data.
API Docs: https://developers.marketo.com/rest-api/bulk-extract/bulk-activity-extract/
"""
primary_key = "marketoGUID"
cursor_field = "activityDate"
def __init__(self, config: Mapping[str, Any]):
super().__init__(config, "activities")
@property
def stream_filter(self):
return {"activityTypeIds": [self.activity["id"]]}
def get_json_schema(self) -> Mapping[str, Any]:
properties = {
"marketoGUID": {"type": ["null", "string"]},
"leadId": {"type": ["null", "integer"]},
"activityDate": {"type": ["null", "string"], "format": "date-time"},
"activityTypeId": {"type": ["null", "integer"]},
"campaignId": {"type": ["null", "integer"]},
"primaryAttributeValueId": {"type": ["null", "string"]},
"primaryAttributeValue": {"type": ["null", "string"]},
}
if "attributes" in self.activity:
for attr in self.activity["attributes"]:
attr_name = clean_string(attr["name"])
if attr["dataType"] == "date":
field_schema = {"type": "string", "format": "date"}
elif attr["dataType"] == "datetime":
field_schema = {"type": "string", "format": "date-time"}
elif attr["dataType"] in ["integer", "percent", "score"]:
field_schema = {"type": "integer"}
elif attr["dataType"] in ["float", "currency"]:
field_schema = {"type": "number"}
elif attr["dataType"] == "boolean":
field_schema = {"type": "boolean"}
elif attr["dataType"] in STRING_TYPES:
field_schema = {"type": "string"}
elif attr["dataType"] in ["array"]:
field_schema = {"type": "array", "items": {"type": ["integer", "number", "string", "null"]}}
else:
field_schema = {"type": "string"}
field_schema["type"] = [field_schema["type"], "null"]
properties[attr_name] = field_schema
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": ["null", "object"],
"additionalProperties": True,
"properties": properties,
}
return schema
| Activities |
python | etianen__django-reversion | tests/test_app/tests/test_views.py | {
"start": 777,
"end": 1311
} | class ____(TestModelMixin, TestBase):
def testRevisionMixin(self):
response = self.client.post("/test-app/revision-mixin/")
obj = TestModel.objects.get(pk=response.content)
self.assertSingleRevision((obj,))
def testRevisionMixinGet(self):
self.client.get("/test-app/revision-mixin/")
self.assertNoRevision()
def testRevisionMixinCustomPredicate(self):
self.client.post("/test-app/revision-mixin/", HTTP_X_NOREVISION="true")
self.assertNoRevision()
| RevisionMixinTest |
python | django__django | django/contrib/admin/views/main.py | {
"start": 1335,
"end": 1638
} | class ____(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Populate "fields" dynamically because SEARCH_VAR is a variable:
self.fields = {
SEARCH_VAR: forms.CharField(required=False, strip=False),
}
| ChangeListSearchForm |
python | apache__avro | lang/py/avro/test/test_schema.py | {
"start": 34233,
"end": 36111
} | class ____(unittest.TestCase):
"""Enable generating parse test cases over all the valid and invalid example schema."""
def __init__(self, test_schema):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("parse_valid" if test_schema.valid else "parse_invalid")
self.test_schema = test_schema
# Never hide repeated warnings when running this test case.
warnings.simplefilter("always")
def parse_valid(self) -> None:
"""Parsing a valid schema should not error, but may contain warnings."""
test_warnings = self.test_schema.warnings or []
try:
warnings.filterwarnings(action="error", category=avro.errors.IgnoredLogicalType)
self.test_schema.parse()
except avro.errors.IgnoredLogicalType as e:
self.assertIn(type(e), (type(w) for w in test_warnings))
self.assertIn(str(e), (str(w) for w in test_warnings))
except (avro.errors.AvroException, avro.errors.SchemaParseException): # pragma: no coverage
self.fail(f"Valid schema failed to parse: {self.test_schema!s}")
else:
self.assertEqual([], test_warnings)
finally:
warnings.filterwarnings(action="default", category=avro.errors.IgnoredLogicalType)
def parse_invalid(self):
"""Parsing an invalid schema should error."""
with self.assertRaises(
(avro.errors.AvroException, avro.errors.SchemaParseException), msg=f"Invalid schema should not have parsed: {self.test_schema!s}"
):
self.test_schema.parse()
| SchemaParseTestCase |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/coin_change_min/test_coin_change_min.py | {
"start": 18,
"end": 622
} | class ____(unittest.TestCase):
def test_coin_change(self):
coin_changer = CoinChanger()
self.assertRaises(TypeError, coin_changer.make_change, None, None)
self.assertEqual(coin_changer.make_change([], 0), 0)
self.assertEqual(coin_changer.make_change([1, 2, 3], 5), 2)
self.assertEqual(coin_changer.make_change([3, 2, 1], 5), 2)
self.assertEqual(coin_changer.make_change([3, 2, 1], 8), 3)
print('Success: test_coin_change')
def main():
test = TestCoinChange()
test.test_coin_change()
if __name__ == '__main__':
main()
| TestCoinChange |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 7239,
"end": 7925
} | class ____(Benchmark):
param_names = ['sparse_type', 'num_matrices']
params = [
['spmatrix', 'sparray'],
[1000, 5000, 10000, 15000, 20000],
]
def setup(self, sparse_type, num_matrices):
self.matrices = []
for i in range(num_matrices):
rows = np.random.randint(1, 20)
columns = np.random.randint(1, 20)
density = 2e-3
nnz_per_row = int(density*columns)
mat = random_sparse(rows, columns, nnz_per_row, sparse_type)
self.matrices.append(mat)
def time_block_diag(self, sparse_type, num_matrices):
sparse.block_diag(self.matrices)
| BlockDiagSparseConstruction |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 70744,
"end": 71275
} | class ____(FieldValues):
"""
Invalid values for `MultipleChoiceField(allow_empty=False)`.
"""
valid_inputs = {
}
invalid_inputs = (
([], ['This selection may not be empty.']),
)
outputs = [
]
field = serializers.MultipleChoiceField(
choices=[
('consistency', 'Consistency'),
('availability', 'Availability'),
('partition', 'Partition tolerance'),
],
allow_empty=False
)
# File serializers...
| TestEmptyMultipleChoiceField |
python | ray-project__ray | python/ray/tune/examples/pbt_tune_cifar10_with_keras.py | {
"start": 902,
"end": 7575
} | class ____(Trainable):
def _read_data(self):
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype("float32")
x_train /= 255
x_test = x_test.astype("float32")
x_test /= 255
return (x_train, y_train), (x_test, y_test)
def _build_model(self, input_shape):
x = Input(shape=(32, 32, 3))
y = x
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal",
)(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Flatten()(y)
y = Dropout(self.config.get("dropout", 0.5))(y)
y = Dense(units=10, activation="softmax", kernel_initializer="he_normal")(y)
model = Model(inputs=x, outputs=y, name="model1")
return model
def setup(self, config):
self.train_data, self.test_data = self._read_data()
x_train = self.train_data[0]
model = self._build_model(x_train.shape[1:])
opt = tf.keras.optimizers.Adadelta(
lr=self.config.get("lr", 1e-4), weight_decay=self.config.get("decay", 1e-4)
)
model.compile(
loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]
)
self.model = model
def step(self):
x_train, y_train = self.train_data
x_train, y_train = x_train[:NUM_SAMPLES], y_train[:NUM_SAMPLES]
x_test, y_test = self.test_data
x_test, y_test = x_test[:NUM_SAMPLES], y_test[:NUM_SAMPLES]
aug_gen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by dataset std
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
)
aug_gen.fit(x_train)
batch_size = self.config.get("batch_size", 64)
gen = aug_gen.flow(x_train, y_train, batch_size=batch_size)
self.model.fit_generator(
generator=gen, epochs=self.config.get("epochs", 1), validation_data=None
)
# loss, accuracy
_, accuracy = self.model.evaluate(x_test, y_test, verbose=0)
return {"mean_accuracy": accuracy}
def save_checkpoint(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
def load_checkpoint(self, checkpoint_dir):
# See https://stackoverflow.com/a/42763323
del self.model
file_path = checkpoint_dir + "/model"
self.model = load_model(file_path)
def cleanup(self):
# If need, save your model when exit.
# saved_path = self.model.save(self.logdir)
# print("save model at: ", saved_path)
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
space = {
"epochs": 1,
"batch_size": 64,
"lr": tune.grid_search([10**-4, 10**-5]),
"decay": tune.sample_from(lambda spec: spec.config.lr / 100.0),
"dropout": tune.grid_search([0.25, 0.5]),
}
if args.smoke_test:
space["lr"] = 10**-4
space["dropout"] = 0.5
perturbation_interval = 10
pbt = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=perturbation_interval,
hyperparam_mutations={
"dropout": lambda _: np.random.uniform(0, 1),
},
)
tuner = tune.Tuner(
tune.with_resources(
Cifar10Model,
resources={"cpu": 1, "gpu": 1},
),
run_config=tune.RunConfig(
name="pbt_cifar10",
stop={
"mean_accuracy": 0.80,
"training_iteration": 30,
},
checkpoint_config=tune.CheckpointConfig(
checkpoint_frequency=perturbation_interval,
checkpoint_score_attribute="mean_accuracy",
num_to_keep=2,
),
),
tune_config=tune.TuneConfig(
scheduler=pbt,
num_samples=4,
metric="mean_accuracy",
mode="max",
reuse_actors=True,
),
param_space=space,
)
results = tuner.fit()
print("Best hyperparameters found were: ", results.get_best_result().config)
| Cifar10Model |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 1938,
"end": 2232
} | class ____(TestWebSocketHandler):
@gen.coroutine
def on_message(self, message):
try:
yield self.write_message(message, isinstance(message, bytes))
except asyncio.CancelledError:
pass
except WebSocketClosedError:
pass
| EchoHandler |
python | bokeh__bokeh | tests/unit/bokeh/server/test_auth_provider.py | {
"start": 7600,
"end": 7673
} | class ____(RequestHandler): pass
""", func, suffix='.py')
| LogoutHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 691989,
"end": 692503
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of MarkDiscussionCommentAsAnswer"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "discussion")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
discussion = sgqlc.types.Field("Discussion", graphql_name="discussion")
"""The discussion that includes the chosen comment."""
| MarkDiscussionCommentAsAnswerPayload |
python | django__django | tests/schema/models.py | {
"start": 3843,
"end": 3988
} | class ____(models.Model):
detail_info = models.TextField()
class Meta:
apps = new_apps
db_table = "schema_note"
| NoteRename |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_code_execution_tool_20250522_param.py | {
"start": 348,
"end": 1017
} | class ____(TypedDict, total=False):
name: Required[Literal["code_execution"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["code_execution_20250522"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
strict: bool
| BetaCodeExecutionTool20250522Param |
python | huggingface__transformers | tests/models/fuyu/test_modeling_fuyu.py | {
"start": 1451,
"end": 5616
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
num_image_tokens=2,
image_size=30,
patch_size=15,
num_channels=3,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=10,
image_token_id=1,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_image_tokens = num_image_tokens
self.seq_length = seq_length + num_image_tokens
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.image_token_id = image_token_id
self.scope = scope
def prepare_config_and_inputs(self):
config = self.get_config()
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[input_ids == config.image_token_id] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_id
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
return config, input_ids, input_mask, sequence_labels, token_labels
def get_config(self):
return FuyuConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
image_patches = floats_tensor(
[self.batch_size, self.num_image_tokens, config.num_channels * config.patch_size**2]
)
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask, "image_patches": image_patches}
return config, inputs_dict
@require_torch
| FuyuModelTester |
python | django-compressor__django-compressor | compressor/parser/lxml.py | {
"start": 242,
"end": 1656
} | class ____(ParserBase):
"""
LxmlParser will use `lxml.html` parser to parse rendered contents of
{% compress %} tag.
"""
def __init__(self, content):
try:
from lxml.html import fromstring
from lxml.etree import tostring
except ImportError as err:
raise ImproperlyConfigured("Error while importing lxml: %s" % err)
except Exception as err:
raise ParserError("Error while initializing parser: %s" % err)
self.fromstring = fromstring
self.tostring = tostring
super().__init__(content)
@cached_property
def tree(self):
"""
Document tree.
"""
content = "<root>%s</root>" % self.content
tree = self.fromstring(content)
self.tostring(tree, encoding=str)
return tree
def css_elems(self):
return self.tree.xpath(
'//link[re:test(@rel, "^stylesheet$", "i")]|style',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
def js_elems(self):
return self.tree.findall("script")
def elem_attribs(self, elem):
return elem.attrib
def elem_content(self, elem):
return smart_str(elem.text)
def elem_name(self, elem):
return elem.tag
def elem_str(self, elem):
return smart_str(self.tostring(elem, method="html", encoding=str))
| LxmlParser |
python | getsentry__sentry | tests/sentry/api/serializers/test_recent_searches.py | {
"start": 194,
"end": 837
} | class ____(TestCase):
def test_simple(self) -> None:
search = RecentSearch.objects.create(
organization=self.organization,
user_id=self.user.id,
type=SearchType.ISSUE.value,
query="some query",
)
result = serialize(search)
assert result["id"] == str(search.id)
assert result["organizationId"] == str(search.organization_id)
assert result["type"] == search.type
assert result["query"] == search.query
assert result["lastSeen"] == search.last_seen
assert result["dateCreated"] == search.date_added
| RecentSearchSerializerTest |
python | google__jax | jax/_src/pallas/mosaic/lowering.py | {
"start": 8256,
"end": 8467
} | class ____(Protocol):
shape: tuple[jax_core.DimSize, ...]
dtype: jnp.dtype
weak_type: bool
def update(self, **kwargs: Any) -> Self:
raise NotImplementedError
@dataclasses.dataclass
| ShapedAbstractValue |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/decision_tree.py | {
"start": 678,
"end": 5380
} | class ____(AutoSklearnClassificationAlgorithm):
def __init__(
self,
criterion,
max_features,
max_depth_factor,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
min_impurity_decrease,
class_weight=None,
random_state=None,
):
self.criterion = criterion
self.max_features = max_features
self.max_depth_factor = max_depth_factor
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.class_weight = class_weight
self.estimator = None
def fit(self, X, y, sample_weight=None):
from sklearn.tree import DecisionTreeClassifier
self.max_features = float(self.max_features)
# Heuristic to set the tree depth
if check_none(self.max_depth_factor):
max_depth_factor = self.max_depth_factor = None
else:
num_features = X.shape[1]
self.max_depth_factor = int(self.max_depth_factor)
max_depth_factor = max(
1, int(np.round(self.max_depth_factor * num_features, 0))
)
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.estimator = DecisionTreeClassifier(
criterion=self.criterion,
max_depth=max_depth_factor,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_leaf_nodes=self.max_leaf_nodes,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
class_weight=self.class_weight,
random_state=self.random_state,
)
self.estimator.fit(X, y, sample_weight=sample_weight)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
probas = self.estimator.predict_proba(X)
probas = convert_multioutput_multiclass_to_multilabel(probas)
return probas
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "DT",
"name": "Decision Tree Classifier",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default_value="gini"
)
max_depth_factor = UniformFloatHyperparameter(
"max_depth_factor", 0.0, 2.0, default_value=0.5
)
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 0.0)
max_features = UnParametrizedHyperparameter("max_features", 1.0)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_impurity_decrease = UnParametrizedHyperparameter(
"min_impurity_decrease", 0.0
)
cs.add_hyperparameters(
[
criterion,
max_features,
max_depth_factor,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
min_impurity_decrease,
]
)
return cs
| DecisionTree |
python | mahmoud__glom | glom/grouping.py | {
"start": 6789,
"end": 7948
} | class ____:
"""takes a random sample of the values
>>> glom([1, 2, 3], Group(Sample(2))) # doctest: +SKIP
[1, 3]
>>> glom(range(5000), Group(Sample(2))) # doctest: +SKIP
[272, 2901]
The advantage of this over :func:`random.sample` is that this can
take an arbitrarily-sized, potentially-very-long streaming input
and returns a fixed-size output. Note that this does not stream
results out, so your streaming input must have finite length.
"""
__slots__ = ('size',)
def __init__(self, size):
self.size = size
def agg(self, target, tree):
# simple reservoir sampling scheme
# https://en.wikipedia.org/wiki/Reservoir_sampling#Simple_algorithm
if self not in tree:
tree[self] = [0, []]
num_seen, sample = tree[self]
if len(sample) < self.size:
sample.append(target)
else:
pos = random.randint(0, num_seen)
if pos < self.size:
sample[pos] = target
tree[self][0] += 1
return sample
def __repr__(self):
return f'{self.__class__.__name__}({self.size!r})'
| Sample |
python | ray-project__ray | python/ray/data/_internal/datasource/image_datasource.py | {
"start": 838,
"end": 5883
} | class ____(FileBasedDatasource):
"""A datasource that lets you read images."""
_WRITE_FILE_PER_ROW = True
_FILE_EXTENSIONS = ["png", "jpg", "jpeg", "tif", "tiff", "bmp", "gif"]
# Use 8 threads per task to read image files.
_NUM_THREADS_PER_TASK = 8
def __init__(
self,
paths: Union[str, List[str]],
size: Optional[Tuple[int, int]] = None,
mode: Optional[str] = None,
**file_based_datasource_kwargs,
):
super().__init__(paths, **file_based_datasource_kwargs)
_check_import(self, module="PIL", package="Pillow")
if size is not None and len(size) != 2:
raise ValueError(
"Expected `size` to contain two integers for height and width, "
f"but got {len(size)} integers instead."
)
if size is not None and (size[0] < 0 or size[1] < 0):
raise ValueError(
f"Expected `size` to contain positive integers, but got {size} instead."
)
self.size = size
self.mode = mode
meta_provider = file_based_datasource_kwargs.get("meta_provider", None)
if isinstance(meta_provider, ImageFileMetadataProvider):
self._encoding_ratio = self._estimate_files_encoding_ratio()
meta_provider._set_encoding_ratio(self._encoding_ratio)
else:
self._encoding_ratio = IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT
def _read_stream(
self,
f: "pyarrow.NativeFile",
path: str,
) -> Iterator[Block]:
from PIL import Image, UnidentifiedImageError
data = f.readall()
try:
image = Image.open(io.BytesIO(data))
except UnidentifiedImageError as e:
raise ValueError(f"PIL couldn't load image file at path '{path}'.") from e
if self.size is not None and image.size != tuple(reversed(self.size)):
height, width = self.size
image = image.resize((width, height), resample=Image.BILINEAR)
if self.mode is not None and image.mode != self.mode:
image = image.convert(self.mode)
builder = DelegatingBlockBuilder()
array = np.asarray(image)
item = {"image": array}
builder.add(item)
block = builder.build()
yield block
def _rows_per_file(self):
return 1
def estimate_inmemory_data_size(self) -> Optional[int]:
total_size = 0
for file_size in self._file_sizes():
# NOTE: check if file size is not None, because some metadata provider
# such as FastFileMetadataProvider does not provide file size information.
if file_size is not None:
total_size += file_size
return total_size * self._encoding_ratio
def _estimate_files_encoding_ratio(self) -> float:
"""Return an estimate of the image files encoding ratio."""
start_time = time.perf_counter()
# Filter out empty file to avoid noise.
non_empty_path_and_size = list(
filter(lambda p: p[1] > 0, zip(self._paths(), self._file_sizes()))
)
num_files = len(non_empty_path_and_size)
if num_files == 0:
logger.warning(
"All input image files are empty. "
"Use on-disk file size to estimate images in-memory size."
)
return IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT
if self.size is not None and self.mode is not None:
# Use image size and mode to calculate data size for all images,
# because all images are homogeneous with same size after resizing.
# Resizing is enforced when reading every image in `ImageDatasource`
# when `size` argument is provided.
if self.mode in ["1", "L", "P"]:
dimension = 1
elif self.mode in ["RGB", "YCbCr", "LAB", "HSV"]:
dimension = 3
elif self.mode in ["RGBA", "CMYK", "I", "F"]:
dimension = 4
else:
logger.warning(f"Found unknown image mode: {self.mode}.")
return IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT
height, width = self.size
single_image_size = height * width * dimension
total_estimated_size = single_image_size * num_files
total_file_size = sum(p[1] for p in non_empty_path_and_size)
ratio = total_estimated_size / total_file_size
else:
# TODO(chengsu): sample images to estimate data size
ratio = IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT
sampling_duration = time.perf_counter() - start_time
if sampling_duration > 5:
logger.warning(
"Image input size estimation took "
f"{round(sampling_duration, 2)} seconds."
)
logger.debug(f"Estimated image encoding ratio from sampling is {ratio}.")
return max(ratio, IMAGE_ENCODING_RATIO_ESTIMATE_LOWER_BOUND)
| ImageDatasource |
python | openai__openai-python | src/openai/types/fine_tuning/alpha/grader_run_response.py | {
"start": 262,
"end": 1094
} | class ____(BaseModel):
formula_parse_error: bool
invalid_variable_error: bool
api_model_grader_parse_error: bool = FieldInfo(alias="model_grader_parse_error")
api_model_grader_refusal_error: bool = FieldInfo(alias="model_grader_refusal_error")
api_model_grader_server_error: bool = FieldInfo(alias="model_grader_server_error")
api_model_grader_server_error_details: Optional[str] = FieldInfo(
alias="model_grader_server_error_details", default=None
)
other_error: bool
python_grader_runtime_error: bool
python_grader_runtime_error_details: Optional[str] = None
python_grader_server_error: bool
python_grader_server_error_type: Optional[str] = None
sample_parse_error: bool
truncated_observation_error: bool
unresponsive_reward_error: bool
| MetadataErrors |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 1100,
"end": 1233
} | class ____(ProxyBase):
class Meta:
proxy = True
def __unicode__(self):
return f"<ProxyA: {self.title}>"
| ProxyA |
python | wandb__wandb | wandb/sdk/internal/job_builder.py | {
"start": 2398,
"end": 2449
} | class ____(TypedDict):
image: str
| ImageSourceDict |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/dataframeeditor.py | {
"start": 25189,
"end": 57542
} | class ____(QTableView, SpyderWidgetMixin):
"""
View displaying a dataframe in the dataframe editor
This is a view (in the sense of the Qt model/view architecture) that is
used in the dataframe editor to display a dataframe. It only shows the
data but not the index (row headings) or header (column names).
Parameters
----------
parent : Optional[QWidget]
The parent widget.
model : DataFrameModel
Model encapsulating the displayed dataframe.
header : QHeaderView
The header (column names) of the view.
hscroll : QScrollBar
The horizontal scroll bar.
vscroll : QScrollBar
The vertical scroll bar.
namespacebrowser : Optional[NamespaceBrowser], optional
The namespace browser that opened the editor containing this view.
The default is None.
data_function : Optional[Callable[[], Any]], optional
A function which returns the new data frame when the user clicks on
the Refresh button. The default is None.
readonly : bool, optional
If True, then the user can not edit the dataframe. The default is
False.
Signals
-------
sig_sort_by_column(): Raised after more columns are fetched.
sig_fetch_more_rows(): Raised after more rows are fetched.
"""
sig_sort_by_column = Signal()
sig_fetch_more_columns = Signal()
sig_fetch_more_rows = Signal()
CONF_SECTION = 'variable_explorer'
def __init__(
self,
parent: Optional[QWidget],
model: DataFrameModel,
header: QHeaderView,
hscroll: QScrollBar,
vscroll: QScrollBar,
namespacebrowser: Optional[NamespaceBrowser] = None,
data_function: Optional[Callable[[], Any]] = None,
readonly: bool = False
):
QTableView.__init__(self, parent)
self.namespacebrowser = namespacebrowser
self.data_function = data_function
self.readonly = readonly
self.menu = None
self.menu_header_h = None
self.empty_ws_menu = None
self.copy_action = None
self.edit_action = None
self.edit_header_action = None
self.insert_action_above = None
self.insert_action_below = None
self.insert_action_after = None
self.insert_action_before = None
self.remove_row_action = None
self.remove_col_action = None
self.duplicate_row_action = None
self.duplicate_col_action = None
self.convert_to_menu = None
self.resize_action = None
self.resize_columns_action = None
self.histogram_action = None
self.menu = self.setup_menu()
self.menu_header_h = self.setup_menu_header()
self.register_shortcut_for_widget(name='copy', triggered=self.copy)
self.setModel(model)
self.setHorizontalScrollBar(hscroll)
self.setVerticalScrollBar(vscroll)
self.setHorizontalScrollMode(QTableView.ScrollPerPixel)
self.setVerticalScrollMode(QTableView.ScrollPerPixel)
self.sort_old = [None]
self.header_class = header
self.header_class.setContextMenuPolicy(Qt.CustomContextMenu)
self.header_class.customContextMenuRequested.connect(
self.show_header_menu)
self.header_class.sectionClicked.connect(self.sortByColumn)
self.horizontalScrollBar().valueChanged.connect(
self._load_more_columns)
self.verticalScrollBar().valueChanged.connect(self._load_more_rows)
def _load_more_columns(self, value):
"""Load more columns to display."""
# Needed to avoid a NameError while fetching data when closing
# See spyder-ide/spyder#12034.
try:
self.load_more_data(value, columns=True)
except NameError:
pass
def _load_more_rows(self, value):
"""Load more rows to display."""
# Needed to avoid a NameError while fetching data when closing
# See spyder-ide/spyder#12034.
try:
self.load_more_data(value, rows=True)
except NameError:
pass
def load_more_data(self, value, rows=False, columns=False):
"""Load more rows and columns to display."""
try:
if rows and value == self.verticalScrollBar().maximum():
self.model().fetch_more(rows=rows)
self.sig_fetch_more_rows.emit()
if columns and value == self.horizontalScrollBar().maximum():
self.model().fetch_more(columns=columns)
self.sig_fetch_more_columns.emit()
except NameError:
# Needed to handle a NameError while fetching data when closing
# See spyder-ide/spyder#7880.
pass
def setModel(self, model: DataFrameModel) -> None:
"""
Set the model for the view to present.
This overrides the function in QTableView so that we can enable or
disable actions when appropriate if the selection changes.
"""
super().setModel(model)
self.selectionModel().selectionChanged.connect(self.refresh_menu)
self.refresh_menu()
def sortByColumn(self, index):
"""Implement a column sort."""
if self.sort_old == [None]:
self.header_class.setSortIndicatorShown(True)
sort_order = self.header_class.sortIndicatorOrder()
if not self.model().sort(index, sort_order):
if len(self.sort_old) != 2:
self.header_class.setSortIndicatorShown(False)
else:
self.header_class.setSortIndicator(self.sort_old[0],
self.sort_old[1])
return
self.sort_old = [index, self.header_class.sortIndicatorOrder()]
self.sig_sort_by_column.emit()
def show_header_menu(self, pos):
"""Show edition menu for header."""
global_pos = self.mapToGlobal(pos)
index = self.indexAt(pos)
self.header_class.setCurrentIndex(index)
self.menu_header_h.popup(global_pos)
def contextMenuEvent(self, event):
"""Reimplement Qt method."""
self.menu.popup(event.globalPos())
event.accept()
def setup_menu_header(self):
"""Setup context header menu."""
edit_header_action = self.create_action(
name=DataframeEditorActions.EditHeader,
text=_("Edit"),
icon=ima.icon('edit'),
triggered=self.edit_header_item,
register_action=False
)
edit_header_action.setEnabled(not self.readonly)
menu = self.create_menu(DataframeEditorMenus.Header, register=False)
self.add_item_to_menu(edit_header_action, menu)
return menu
def refresh_menu(self):
"""Refresh context menu"""
index = self.currentIndex()
# Enable/disable edit actions
condition_edit = (
index.isValid()
and len(self.selectedIndexes()) == 1
and not self.readonly
)
for action in [self.edit_action, self.insert_action_above,
self.insert_action_below, self.insert_action_after,
self.insert_action_before, self.duplicate_row_action,
self.duplicate_col_action]:
action.setEnabled(condition_edit)
# Enable/disable actions for remove col/row, copy and plot
condition_copy_remove = (
index.isValid()
and len(self.selectedIndexes()) > 0
and not self.readonly
)
for action in [self.copy_action, self.remove_row_action,
self.remove_col_action, self.histogram_action]:
action.setEnabled(condition_copy_remove)
# Enable/disable action for plot
condition_plot = (index.isValid() and len(self.selectedIndexes()) > 0)
self.histogram_action.setEnabled(condition_plot)
def setup_menu(self):
"""Setup context menu."""
# ---- Create actions
self.resize_action = self.create_action(
name=DataframeEditorActions.ResizeRows,
text=_("Resize rows to contents"),
icon=ima.icon('collapse_row'),
triggered=lambda: self.resize_to_contents(rows=True),
register_action=False
)
self.resize_columns_action = self.create_action(
name=DataframeEditorActions.ResizeColumns,
text=_("Resize columns to contents"),
icon=ima.icon('collapse_column'),
triggered=self.resize_to_contents,
register_action=False
)
self.edit_action = self.create_action(
name=DataframeEditorActions.Edit,
text=_("Edit"),
icon=ima.icon('edit'),
triggered=self.edit_item,
register_action=False
)
self.insert_action_above = self.create_action(
name=DataframeEditorActions.InsertAbove,
text=_("Insert above"),
icon=ima.icon('insert_above'),
triggered=lambda: self.insert_item(axis=1, before_above=True),
register_action=False
)
self.insert_action_below = self.create_action(
name=DataframeEditorActions.InsertBelow,
text=_("Insert below"),
icon=ima.icon('insert_below'),
triggered=lambda: self.insert_item(axis=1, before_above=False),
register_action=False
)
self.insert_action_before = self.create_action(
name=DataframeEditorActions.InsertBefore,
text=_("Insert before"),
icon=ima.icon('insert_before'),
triggered=lambda: self.insert_item(axis=0, before_above=True),
register_action=False
)
self.insert_action_after = self.create_action(
name=DataframeEditorActions.InsertAfter,
text=_("Insert after"),
icon=ima.icon('insert_after'),
triggered=lambda: self.insert_item(axis=0, before_above=False),
register_action=False
)
self.remove_row_action = self.create_action(
name=DataframeEditorActions.RemoveRow,
text=_("Remove row"),
icon=ima.icon('delete_row'),
triggered=self.remove_item,
register_action=False
)
self.remove_col_action = self.create_action(
name=DataframeEditorActions.RemoveColumn,
text=_("Remove column"),
icon=ima.icon('delete_column'),
triggered=lambda: self.remove_item(axis=1),
register_action=False
)
self.duplicate_row_action = self.create_action(
name=DataframeEditorActions.DuplicateRow,
text=_("Duplicate row"),
icon=ima.icon('duplicate_row'),
triggered=lambda: self.duplicate_row_col(dup_row=True),
register_action=False
)
self.duplicate_col_action = self.create_action(
name=DataframeEditorActions.DuplicateColumn,
text=_("Duplicate column"),
icon=ima.icon('duplicate_column'),
triggered=lambda: self.duplicate_row_col(dup_row=False),
register_action=False
)
self.copy_action = self.create_action(
name=DataframeEditorActions.Copy,
text=_('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
register_action=False
)
self.copy_action.setShortcut(keybinding('Copy'))
self.copy_action.setShortcutContext(Qt.WidgetShortcut)
self.histogram_action = self.create_action(
name=DataframeEditorActions.Histogram,
text=_("Histogram"),
tip=_("Plot a histogram of the selected columns"),
icon=ima.icon('hist'),
triggered=self.plot_hist,
register_action=False
)
# ---- Create "Convert to" submenu and actions
self.convert_to_menu = self.create_menu(
menu_id=DataframeEditorMenus.ConvertTo,
title=_('Convert to'),
register=False
)
functions = (
(_("Bool"), bool, DataframeEditorActions.ConvertToBool),
(_("Complex"), complex, DataframeEditorActions.ConvertToComplex),
(_("Int"), int, DataframeEditorActions.ConvertToInt),
(_("Float"), float, DataframeEditorActions.ConvertToFloat),
(_("Str"), str, DataframeEditorActions.ConvertToStr)
)
for text, func, name in functions:
def slot():
self.change_type(func)
action = self.create_action(
name=name,
text=text,
triggered=slot,
context=Qt.WidgetShortcut,
register_action=False
)
self.add_item_to_menu(action, self.convert_to_menu)
# ---- Create context menu and fill it
menu = self.create_menu(DataframeEditorMenus.Context, register=False)
for action in [self.copy_action, self.edit_action]:
self.add_item_to_menu(
action,
menu,
section=DataframeEditorContextMenuSections.Edit
)
for action in [self.insert_action_above, self.insert_action_below,
self.duplicate_row_action, self.remove_row_action]:
self.add_item_to_menu(
action,
menu,
section=DataframeEditorContextMenuSections.Row
)
for action in [self.insert_action_before, self.insert_action_after,
self.duplicate_col_action, self.remove_col_action]:
self.add_item_to_menu(
action,
menu,
section=DataframeEditorContextMenuSections.Column
)
self.add_item_to_menu(
self.convert_to_menu,
menu,
section=DataframeEditorContextMenuSections.Convert
)
return menu
def change_type(self, func):
"""A function that changes types of cells."""
model = self.model()
index_list = self.selectedIndexes()
[model.setData(i, '', change_type=func) for i in index_list]
@Slot()
def copy(self):
"""Copy text to clipboard"""
if not self.selectedIndexes():
return
(row_min, row_max,
col_min, col_max) = get_idx_rect(self.selectedIndexes())
# Copy index and header too (equal True).
# See spyder-ide/spyder#11096
index = header = True
df = self.model().df
obj = df.iloc[slice(row_min, row_max + 1),
slice(col_min, col_max + 1)]
output = io.StringIO()
try:
obj.to_csv(output, sep='\t', index=index, header=header)
except UnicodeEncodeError:
# Needed to handle encoding errors in Python 2
# See spyder-ide/spyder#4833
QMessageBox.critical(
self,
_("Error"),
_("Text can't be copied."))
contents = output.getvalue()
output.close()
clipboard = QApplication.clipboard()
clipboard.setText(contents)
def resize_to_contents(self, rows=False):
"""Resize rows or cols to its contents."""
if isinstance(self.parent(), DataFrameEditor):
if rows:
self.resizeRowsToContents()
self.parent().table_index.resizeRowsToContents()
else:
self.parent().table_index.resizeColumnsToContents()
self.parent().resize_to_contents()
def edit_header_item(self):
"""Edit header item"""
pos = self.header_class.currentIndex()
index = self.header_class.logicalIndex(pos.column())
if index >= 0:
model_index = self.header_class.model().index(0, index)
index_number_rows = 1
if type(self.model().df.columns[0]) is tuple:
index_number_rows = len(self.model().df.columns[0])
if index_number_rows > 1:
dialog = QInputDialog()
dialog.setWindowTitle("Enter the values")
label = QLabel("Enter the values:")
dialog.show()
dialog.findChild(QLineEdit).hide()
dialog.findChild(QLabel).hide()
lines = []
for row in range(index_number_rows):
line = QLineEdit(text=self.model().df.columns[index][row])
dialog.layout().insertWidget(row, line)
lines.append(line)
dialog.layout().insertWidget(0, label)
dialog.hide()
confirmation = dialog.exec_() == QDialog.Accepted
if confirmation:
value = tuple(line.text() for line in lines)
else:
value, confirmation = QInputDialog.getText(
self,
_("Enter a value"),
_("Enter a value"),
QLineEdit.Normal,
""
)
if confirmation:
if value not in self.model().df.columns.tolist():
if type(value) is tuple:
n_cols = len(self.model().df.columns)
cols = self.model().df.columns
names = cols.names
cols = (
self.model().df.columns.tolist()[0:index]
+ [value]
+ self.model().df.columns.tolist()[index+1:n_cols]
)
self.model().df.columns = (
pd.MultiIndex.from_tuples(cols, names=names)
)
else:
self.header_class.model().setData(
model_index,
value,
Qt.EditRole
)
self.parent()._reload()
self.model().dataChanged.emit(pos, pos)
else:
QMessageBox.warning(
self.model().dialog,
_("Warning: Duplicate column"),
_('Column with name "{}" already exists!').format(
value)
)
def edit_item(self):
"""Edit item"""
index = self.currentIndex()
if not index.isValid():
return
self.edit(index)
def insert_item(self, axis=0, before_above=False):
"""Insert row or column."""
current_index = self.currentIndex()
if not current_index.isValid():
return False
column = current_index.column()
row = current_index.row()
step = 0
df = self.model().df
if not before_above:
step = 1
if axis == 0:
# insert column
module = df.iat[row, column].__class__.__module__
if module == 'builtins':
# Evaluate character '' (empty) to initialize the column as a
# neutral data type
eval_type = df.iat[row, column].__class__.__name__ + '('')'
else:
# Necessary because of import numpy as np
if module == 'numpy':
module = 'np'
eval_type = (
module
+ '.'
+ df.iat[row, column].__class__.__name__
+ '('')'
)
indexes = df.axes[1].tolist()
new_name = 'new_col'
if type(indexes[column]) is not str:
new_name = indexes[column]
if new_name in indexes:
if type(new_name) is tuple:
tuple_idx = []
new_tuple = []
for idx in indexes:
tuple_idx = tuple_idx + list(idx)
for idx in range(len(new_name)):
new_tuple.append(
self.next_index_name(tuple_idx, new_name[idx])
)
new_name = tuple(new_tuple)
else:
new_name = self.next_index_name(indexes, new_name)
item_value = eval(eval_type)
if isinstance(item_value, tuple) and item_value == ():
item_value = ('')
df.insert(
loc=column + step,
column=new_name,
value=item_value,
allow_duplicates=True
)
self.model().max_min_col_update()
if before_above:
column = column + 1
if axis == 1:
# insert row
indexes = df.axes[0].tolist()
new_name = 'new_row'
if type(indexes[row]) is not str:
new_name = indexes[row]
if new_name in indexes:
new_name = self.next_index_name(indexes, new_name)
# Slice the upper half of the dataframe
df1 = df[0:row + step]
# Store the result of lower half of the dataframe
df2 = df[row + step:]
# Insert the row in the upper half dataframe
new_row = df.iloc[[row]]
new_row.axes[0].values[0] = new_name
for col in range(len(new_row.columns)):
module = new_row.iat[0, col].__class__.__module__
if module == 'builtins':
# Evaluate character '' (empty) to initialyze the column as
# a neutral data type
eval_type = new_row.iat[0, col].__class__.__name__ + '('')'
else:
# Necessary because of import numpy as np
if module == 'numpy':
module = 'np'
eval_type = (
module
+ '.'
+ new_row.iat[0, col].__class__.__name__
+ '('')'
)
new_row.iat[0, col] = eval(eval_type)
self.model().df = pd.concat([df1, new_row, df2])
if before_above:
row = row + 1
self.parent()._reload()
self.model().dataChanged.emit(current_index, current_index)
self.setCurrentIndex(self.model().index(row, column))
def duplicate_row_col(self, dup_row=False):
"""Duplicate row or column."""
current_index = self.currentIndex()
if not current_index.isValid():
return False
column = current_index.column()
row = current_index.row()
df = self.model().df
if dup_row:
# Slice the upper half of the dataframe
df1 = self.model().df[0:row]
# Store the result of lower half of the dataframe
df2 = self.model().df[row:]
# Insert the row in the upper half dataframe
new_row = self.model().df.iloc[[row]]
label = new_row.axes[0].values[0]
indexes = self.model().df.axes[0].tolist()
indexes.remove(label)
new_name = self.next_index_name(indexes, label)
new_row.axes[0].values[0] = new_name
self.model().df = pd.concat([df1, new_row, df2])
row = row + 1
else:
indexes = df.axes[1].tolist()
label = indexes[column]
indexes.remove(label)
if type(label) is tuple:
tuple_idx = []
new_tuple = []
for idx in indexes:
tuple_idx = tuple_idx + list(idx)
for idx in range(len(label)):
new_tuple.append(
self.next_index_name(tuple_idx, label[idx])
)
new_name = tuple(new_tuple)
else:
new_name = self.next_index_name(indexes, label)
df.insert(loc=column+1, column=new_name, value='',
allow_duplicates=True)
df[new_name] = df.iloc[:, column]
self.model().max_min_col_update()
self.parent()._reload()
self.model().dataChanged.emit(current_index, current_index)
self.setCurrentIndex(self.model().index(row, column))
def next_index_name(self, indexes, label):
"""
Calculate and generate next index_name for a duplicate column/row
rol/col_copy(ind).
"""
ind = -1
name = ''
acceptable_types = (
[str, float, int, complex, bool]
+ list(REAL_NUMBER_TYPES)
+ list(COMPLEX_NUMBER_TYPES)
)
if type(label) not in acceptable_types:
# Case receiving a different type of acceptable_type,
# treat as string
label = str(label)
if type(label) is str:
# Make all indexes strings to compare
for i in range(len(indexes)):
if type(indexes[i]) is not str:
indexes[i] = str(indexes[i])
# Verify if find '_copy(' in the label
if label.rfind('_copy(') == -1:
# If not found, verify in other indexes
name = label + '_copy('
for n in indexes:
if n.rfind(name) == 0:
# label_copy( starts in first position
init_pos = len(name)
final_pos = len(n) - 1
curr_ind = n[init_pos:final_pos]
if (
curr_ind.isnumeric()
and n[final_pos:final_pos+1] == ')'
):
if ind < int(curr_ind):
ind = int(curr_ind)
else:
# If 'copy_(' string is in label, verify if valid and check
# next.
init_pos = label.rfind('_copy(') + 6
final_pos = len(label) - 1
curr_ind = label[init_pos:final_pos]
if curr_ind.isnumeric():
if label[final_pos:final_pos+1] == ')':
ind = int(curr_ind)
name = label[0:init_pos]
for n in indexes:
if n.rfind(name) == 0:
init_pos = len(name)
final_pos = len(n) - 1
curr_ind = n[init_pos:final_pos]
if (
curr_ind.isnumeric()
and n[final_pos:final_pos+1] == ')'
):
if ind < int(curr_ind):
ind = int(curr_ind)
else:
# If not closed parenthesis, treat entire string as
# valid
name = label + '_copy('
for n in indexes:
if n.rfind(name) == 0:
init_pos = len(name)
final_pos = len(n) - 1
curr_ind = n[init_pos:final_pos]
if (
curr_ind.isnumeric()
and n[final_pos:final_pos+1] == ')'
):
if ind < int(curr_ind):
ind = int(curr_ind)
else:
# Found '_copy(not a number)', treat entire string as valid
# and check if exist other '_copy(Not number)*_copy(number)
name = label + '_copy('
for n in indexes:
if n.rfind(name) == 0:
init_pos = len(name)
final_pos = len(n) - 1
curr_ind = n[init_pos:final_pos]
if (
curr_ind.isnumeric()
and n[final_pos:final_pos+1] == ')'
):
if ind < int(curr_ind):
ind = int(curr_ind)
ind = ind+1
return name + str(ind) + ')'
else:
# Type is numeric: increment 1 and check if it is in list.
label = label + 1
while label in indexes:
label = label + 1
return label
@Slot()
def remove_item(self, force=False, axis=0):
"""Remove item."""
indexes = self.selectedIndexes()
index_label = []
df = self.model().df
if not indexes:
return
# Keep focus on the item before the deleted one
focus_row = indexes[0].row()
focus_col = indexes[0].column()
if axis == 0 and focus_row > 0:
focus_row = focus_row - 1
if axis == 1 and focus_col > 0:
focus_col = focus_col - 1
for index in indexes:
if not index.isValid():
return
else:
if axis == 0:
row_label = df.axes[axis][index.row()]
if row_label not in index_label:
index_label.append(row_label)
else:
column_label = df.axes[axis][index.column()]
if column_label not in index_label:
index_label.append(column_label)
result = None
if not force:
if (
not self.get_conf('show_remove_message_dataframe')
or running_under_pytest()
):
result = QMessageBox.Yes
else:
one = _("Do you want to remove the selected item?")
more = _("Do you want to remove all selected items?")
answer = MessageCheckBox(
icon=QMessageBox.Question, parent=self
)
answer.set_checkbox_text(_("Don't ask again."))
answer.set_checked(False)
answer.set_check_visible(True)
answer.setText(one if len(indexes) == 1 else more)
answer.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
result = answer.exec_()
check = answer.is_checked()
if check:
self.set_conf('show_remove_message_dataframe', False)
if force or result == QMessageBox.Yes:
for label in index_label:
try:
df.drop(label, inplace=True, axis=axis)
except TypeError as e:
QMessageBox.warning(
self.model().dialog,
_("Warning: It was not possible to remove this item!"),
_("ValueError: {} must be removed from index.").format(
str(e))
)
return False
self.model().max_min_col_update()
self.parent()._reload()
index = QModelIndex()
self.model().dataChanged.emit(index, index)
self.setCurrentIndex(self.model().index(focus_row, focus_col))
def plot_hist(self) -> None:
"""
Plot histogram of selected columns
"""
def plot_function(figure: Figure) -> None:
ax = figure.subplots()
model.df.hist(column=col_labels, ax=ax)
cols = list(index.column() for index in self.selectedIndexes())
cols = list(set(cols)) # Remove duplicates
model = self.model()
col_labels = [model.header(0, col) for col in cols]
self.namespacebrowser.plot(plot_function)
| DataFrameView |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 40850,
"end": 48838
} | class ____:
compiled: Any
backend: xb.XlaBackend
local_input_avals: Sequence[core.AbstractValue]
input_shardings: Sequence[JSharding]
local_output_avals: Sequence[ShapedArray]
output_shardings: Sequence[JSharding]
unordered_effects: list[core.Effect]
ordered_effects: list[core.Effect]
keepalive: Sequence[Any]
host_callbacks: Sequence[Any]
jaxpr_debug_info: core.DebugInfo
def build_execute_fun(self):
input_indices = []
for aval, spec in safe_zip(self.local_input_avals, self.input_shardings):
assert isinstance(spec, sharding_impls.PmapSharding), spec
assert isinstance(aval, core.ShapedArray), aval
input_indices.append(
sharding_specs.spec_to_indices(aval.shape, spec.sharding_spec)
if spec.sharding_spec is not None else None)
handle_outs = local_avals_to_results_handler(self.local_output_avals,
self.output_shardings)
handle_args = InputsHandler(self.input_shardings,
[None] * len(self.input_shardings),
self.compiled.local_devices(), input_indices)
execute_fun = ExecuteReplicated(self.compiled, "parallel computation",
self.backend, handle_args, handle_outs,
self.unordered_effects,
self.ordered_effects, self.keepalive,
bool(self.host_callbacks),
set(range(len(input_indices))), None)
return execute_fun
def load(self) -> PmapExecutable:
fingerprint = getattr(self.compiled, "fingerprint", None)
return PmapExecutable(
self.compiled, self.build_execute_fun, fingerprint,
self.local_input_avals, self)
@staticmethod
def from_hlo(hlo: ir.Module,
pci: ParallelCallableInfo,
replicas: ReplicaInfo,
shards: ShardInfo,
tuple_args: bool,
unordered_effects: list[core.Effect],
ordered_effects: list[core.Effect],
host_callbacks: list[Any],
keepalive: Any,
jaxpr_debug_info: core.DebugInfo,
platforms: Sequence[str],
shape_poly_state: mlir.ShapePolyLoweringState | None = None,
compiler_options=None):
del platforms
if shape_poly_state is not None and shape_poly_state.uses_dim_vars:
hlo = mlir.refine_polymorphic_shapes(hlo)
devices = pci.devices
if devices is None:
if shards.num_global_shards > xb.device_count(pci.backend):
msg = ("compiling computation that requires {} logical devices, but only {} XLA "
"devices are available (num_replicas={})")
raise ValueError(msg.format(shards.num_global_shards,
xb.device_count(pci.backend),
replicas.num_global_replicas))
# On a single host, we simply grab the first N devices from jax.devices().
# In the single host case, we want the default device order of pmap to
# match jax.devices().
# On multiple hosts, we create a default device assignment that ensures
# each host is responsible for a contiguous set of replicas.
if shards.num_global_shards > shards.num_local_shards:
# TODO(skye): use a locality-aware assignment that satisfies the above
# constraint.
devices = [d for process_index in range(xb.process_count(pci.backend))
for d in xb.local_devices(process_index, pci.backend)]
else:
devices = xb.local_devices(backend=pci.backend)[:shards.num_local_shards]
else:
if shards.num_local_shards != len(pci.local_devices):
local_devices_str = ", ".join(map(str, pci.local_devices))
if shards.num_local_shards == pci.axis_size:
raise ValueError(
f"Leading axis size of input to pmapped function must equal the "
f"number of local devices passed to pmap. Got axis_size="
f"{pci.axis_size}, num_local_devices={len(pci.local_devices)}.\n"
f"(Local devices available to pmap: {local_devices_str})")
else:
raise ValueError(
f"pmapped function requires {shards.num_local_shards} local "
f"devices to run due to nested pmapped or other parallel "
f"functions, but only {len(pci.local_devices)} are available.\n"
f"(outer axis size: {pci.axis_size}, local devices available to "
f"pmap: {local_devices_str})")
if shards.num_global_shards != len(devices):
raise ValueError("compiling computation that creates %s shards, "
"but %s devices were specified" %
(shards.num_global_shards, len(devices)))
# 'devices' may be 1D or 2D at this point (e.g.
# get_default_device_assignment() returns 2D assignment, caller may have
# provided 1D list of devices).
# Convert to 2D in case it's 1D and we have > 1 partitions.
num_partitions = 1
device_assignment: np.ndarray = np.array(devices).reshape(
(replicas.num_global_replicas, num_partitions))
compile_options = compiler.get_compile_options(
num_replicas=replicas.num_global_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=False,
env_options_overrides=compiler_options,
detailed_logging=compiler.use_detailed_logging(hlo),
backend=pci.backend,
)
compile_options.parameter_is_tupled_arguments = tuple_args
process_index = xb.process_index(pci.backend)
local_device_assignment = np.array([
d for d in device_assignment.flat if d.process_index == process_index
])
input_sharding_specs = [
sharding_specs.pmap_sharding_spec(
replicas.num_local_replicas, pci.axis_size,
cast(ShapedArray, aval).shape, in_axis)
for aval, in_axis in safe_zip(shards.sharded_avals, pci.in_axes)]
in_shardings = _get_pmap_sharding(local_device_assignment,
input_sharding_specs)
local_unmapped_avals = [
_cast_to_shaped_array(
_pmap_unmapped_aval(pci.axis_size, out_axis, aval))
if out_axis is not None else aval
for aval, out_axis in safe_zip(shards.out_sharded_avals, pci.out_axes)]
out_specs = [
sharding_specs.pmap_sharding_spec(
replicas.num_local_replicas, pci.axis_size, aval.shape, out_axis)
for aval, out_axis in safe_zip(
shards.out_sharded_avals, pci.out_axes)]
out_shardings = _get_pmap_sharding(local_device_assignment, out_specs)
with dispatch.log_elapsed_time(
"Finished XLA compilation of {fun_name} in {elapsed_time:.9f} sec",
fun_name=pci.name, event=dispatch.BACKEND_COMPILE_EVENT):
# `executable_devices` contains devices for output shardings of a pmapped
# function. It contains only local devices for correspondence with
# `PmapSharding`s, which also contain only local devices.
executable_devices = _create_device_list(
tuple(local_device_assignment.flat))
assert executable_devices is not None
compiled = compiler.compile_or_get_cached(
pci.backend, hlo, device_assignment, compile_options,
host_callbacks, executable_devices)
return UnloadedPmapExecutable(
compiled=compiled,
backend=pci.backend,
local_input_avals=pci.avals,
input_shardings=in_shardings,
local_output_avals=local_unmapped_avals,
output_shardings=out_shardings,
unordered_effects=unordered_effects,
ordered_effects=ordered_effects,
keepalive=keepalive,
host_callbacks=host_callbacks,
jaxpr_debug_info=jaxpr_debug_info).load()
| UnloadedPmapExecutable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.