language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster/dagster/_core/launcher/base.py | {
"start": 1691,
"end": 3975
} | class ____(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):
@abstractmethod
def launch_run(self, context: LaunchRunContext) -> None:
"""Launch a run.
This method should begin the execution of the specified run, and may emit engine events.
Runs should be created in the instance (e.g., by calling
``DagsterInstance.create_run()``) *before* this method is called, and
should be in the ``PipelineRunStatus.STARTING`` state. Typically, this method will
not be invoked directly, but should be invoked through ``DagsterInstance.launch_run()``.
Args:
context (LaunchRunContext): information about the launch - every run launcher
will need the PipelineRun, and some run launchers may need information from the
BaseWorkspaceRequestContext from which the run was launched.
"""
@abstractmethod
def terminate(self, run_id: str) -> bool:
"""Terminates a process.
Returns False is the process was already terminated. Returns true if
the process was alive and was successfully terminated
"""
def dispose(self) -> None:
"""Do any resource cleanup that should happen when the DagsterInstance is
cleaning itself up.
"""
def join(self, timeout: int = 30) -> None:
pass
@property
def supports_check_run_worker_health(self) -> bool:
"""Whether the run launcher supports check_run_worker_health."""
return False
def check_run_worker_health(self, run: DagsterRun) -> CheckRunHealthResult:
raise NotImplementedError(
"This run launcher does not support run monitoring. Please disable it on your instance."
)
def get_run_worker_debug_info(
self, run: DagsterRun, include_container_logs: Optional[bool] = True
) -> Optional[str]:
return None
@property
def supports_resume_run(self) -> bool:
"""Whether the run launcher supports resume_run."""
return False
def resume_run(self, context: ResumeRunContext) -> None:
raise NotImplementedError(
"This run launcher does not support resuming runs. If using "
"run monitoring, set max_resume_run_attempts to 0."
)
| RunLauncher |
python | pytorch__pytorch | test/test_testing.py | {
"start": 55695,
"end": 64296
} | class ____(TestCase):
supported_dtypes = dtypes(
torch.bool,
torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64,
torch.float16, torch.bfloat16, torch.float32, torch.float64,
torch.complex32, torch.complex64, torch.complex128,
)
@supported_dtypes
@parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
@parametrize("splat_shape", [False, True])
def test_smoke(self, dtype, device, shape, splat_shape):
t = torch.testing.make_tensor(*shape if splat_shape else shape, dtype=dtype, device=device)
self.assertIsInstance(t, torch.Tensor)
self.assertEqual(t.shape, shape)
self.assertEqual(t.dtype, dtype)
self.assertEqual(t.device, torch.device(device))
@supported_dtypes
@parametrize("requires_grad", [False, True])
def test_requires_grad(self, dtype, device, requires_grad):
make_tensor = functools.partial(
torch.testing.make_tensor,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
if not requires_grad or dtype.is_floating_point or dtype.is_complex:
t = make_tensor()
self.assertEqual(t.requires_grad, requires_grad)
else:
with self.assertRaisesRegex(
ValueError, "`requires_grad=True` is not supported for boolean and integral dtypes"
):
make_tensor()
@supported_dtypes
@parametrize("noncontiguous", [False, True])
@parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
numel = functools.reduce(operator.mul, shape, 1)
t = torch.testing.make_tensor(shape, dtype=dtype, device=device, noncontiguous=noncontiguous)
self.assertEqual(t.is_contiguous(), not noncontiguous or numel < 2)
@supported_dtypes
@parametrize(
"memory_format_and_shape",
[
(None, (2, 3, 4)),
(torch.contiguous_format, (2, 3, 4)),
(torch.channels_last, (2, 3, 4, 5)),
(torch.channels_last_3d, (2, 3, 4, 5, 6)),
(torch.preserve_format, (2, 3, 4)),
],
)
def test_memory_format(self, dtype, device, memory_format_and_shape):
memory_format, shape = memory_format_and_shape
t = torch.testing.make_tensor(shape, dtype=dtype, device=device, memory_format=memory_format)
self.assertTrue(
t.is_contiguous(memory_format=torch.contiguous_format if memory_format is None else memory_format)
)
@supported_dtypes
def test_noncontiguous_memory_format(self, dtype, device):
with self.assertRaisesRegex(ValueError, "`noncontiguous` and `memory_format` are mutually exclusive"):
torch.testing.make_tensor(
(2, 3, 4, 5),
dtype=dtype,
device=device,
noncontiguous=True,
memory_format=torch.channels_last,
)
@supported_dtypes
def test_exclude_zero(self, dtype, device):
t = torch.testing.make_tensor(10_000, dtype=dtype, device=device, exclude_zero=True, low=-1, high=2)
self.assertTrue((t != 0).all())
@supported_dtypes
def test_low_high_smoke(self, dtype, device):
low_inclusive, high_exclusive = 0, 2
t = torch.testing.make_tensor(10_000, dtype=dtype, device=device, low=low_inclusive, high=high_exclusive)
if dtype.is_complex:
t = torch.view_as_real(t)
self.assertTrue(((t >= low_inclusive) & (t < high_exclusive)).all())
@supported_dtypes
def test_low_high_default_smoke(self, dtype, device):
low_inclusive, high_exclusive = {
torch.bool: (0, 2),
torch.uint8: (0, 10),
**dict.fromkeys([torch.int8, torch.int16, torch.int32, torch.int64], (-9, 10)),
}.get(dtype, (-9, 9))
t = torch.testing.make_tensor(10_000, dtype=dtype, device=device, low=low_inclusive, high=high_exclusive)
if dtype.is_complex:
t = torch.view_as_real(t)
self.assertTrue(((t >= low_inclusive) & (t < high_exclusive)).all())
@parametrize("low_high", [(0, 0), (1, 0), (0, -1)])
@parametrize("value_types", list(itertools.product([int, float], repeat=2)))
@supported_dtypes
def test_low_ge_high(self, dtype, device, low_high, value_types):
low, high = (value_type(value) for value, value_type in zip(low_high, value_types))
if low == high and (dtype.is_floating_point or dtype.is_complex):
with self.assertWarnsRegex(
FutureWarning,
"Passing `low==high` to `torch.testing.make_tensor` for floating or complex types is deprecated",
):
t = torch.testing.make_tensor(10_000, dtype=dtype, device=device, low=low, high=high)
self.assertEqual(t, torch.full_like(t, complex(low, low) if dtype.is_complex else low))
else:
with self.assertRaisesRegex(ValueError, "`low` must be less than `high`"):
torch.testing.make_tensor(dtype=dtype, device=device, low=low, high=high)
@supported_dtypes
@parametrize("low_high", [(None, torch.nan), (torch.nan, None), (torch.nan, torch.nan)])
def test_low_high_nan(self, dtype, device, low_high):
low, high = low_high
with self.assertRaisesRegex(ValueError, "`low` and `high` cannot be NaN"):
torch.testing.make_tensor(dtype=dtype, device=device, low=low, high=high)
@supported_dtypes
def test_low_high_outside_valid_range(self, dtype, device):
make_tensor = functools.partial(torch.testing.make_tensor, dtype=dtype, device=device)
def get_dtype_limits(dtype):
if dtype is torch.bool:
return 0, 1
info = (torch.finfo if dtype.is_floating_point or dtype.is_complex else torch.iinfo)(dtype)
# We are using integer bounds here, because otherwise it would be impossible to pass `low` and `high`
# outside their valid range. Python uses 64bit floating point numbers and thus trying to do something like
# `torch.ffinfo(torch.float64)max * 2` will always result in `inf`. On the flipside, Pythons `int` is
# unbounded.
return int(info.min), int(info.max)
lowest_inclusive, highest_inclusive = get_dtype_limits(dtype)
with self.assertRaisesRegex(ValueError, ""):
low, high = (-2, -1) if lowest_inclusive == 0 else (lowest_inclusive * 4, lowest_inclusive * 2)
make_tensor(low=low, high=high)
with self.assertRaisesRegex(ValueError, ""):
make_tensor(low=highest_inclusive * 2, high=highest_inclusive * 4)
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_low_high_boolean_integral1(self, dtype, device):
shape = (10_000,)
eps = 1e-4
actual = torch.testing.make_tensor(shape, dtype=dtype, device=device, low=-(1 - eps), high=1 - eps)
expected = torch.zeros(shape, dtype=dtype, device=device)
torch.testing.assert_close(actual, expected)
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_low_high_boolean_integral2(self, dtype, device):
shape = (10_000,)
if dtype is torch.bool:
low = 1
elif dtype is torch.int64:
# Due to its internals, `make_tensor` is not able to sample `torch.iinfo(torch.int64).max`
low = torch.iinfo(dtype).max - 1
else:
low = torch.iinfo(dtype).max
high = low + 1
actual = torch.testing.make_tensor(shape, dtype=dtype, device=device, low=low, high=high)
expected = torch.full(shape, low, dtype=dtype, device=device)
torch.testing.assert_close(actual, expected)
instantiate_device_type_tests(TestMakeTensor, globals())
def _get_test_names_for_test_class(test_cls):
""" Convenience function to get all test names for a given test class. """
test_names = [f'{test_cls.__name__}.{key}' for key in test_cls.__dict__
if key.startswith('test_')]
return sorted(test_names)
def _get_test_funcs_for_test_class(test_cls):
""" Convenience function to get all (test function, parametrized_name) pairs for a given test class. """
test_funcs = [(getattr(test_cls, key), key) for key in test_cls.__dict__ if key.startswith('test_')]
return test_funcs
| TestMakeTensor |
python | ray-project__ray | rllib/core/models/base.py | {
"start": 653,
"end": 6446
} | class ____(abc.ABC):
"""Framework-agnostic base class for RLlib models.
Models are low-level neural network components that offer input- and
output-specification, a forward method, and a get_initial_state method. Models
are composed in RLModules.
Usage Example together with ModelConfig:
.. testcode::
from ray.rllib.core.models.base import Model
from ray.rllib.core.models.configs import ModelConfig
from ray.rllib.core.models.configs import ModelConfig
from dataclasses import dataclass
class MyModel(Model):
def __init__(self, config):
super().__init__(config)
self.my_param = config.my_param * 2
def _forward(self, input_dict):
return input_dict["obs"] * self.my_param
@dataclass
class MyModelConfig(ModelConfig):
my_param: int = 42
def build(self, framework: str):
if framework == "bork":
return MyModel(self)
config = MyModelConfig(my_param=3)
model = config.build(framework="bork")
print(model._forward({"obs": 1}))
.. testoutput::
6
"""
def __init__(self, config: ModelConfig):
self.config = config
def __init_subclass__(cls, **kwargs):
# Automatically add a __post_init__ method to all subclasses of Model.
# This method is called after the __init__ method of the subclass.
def init_decorator(previous_init):
def new_init(self, *args, **kwargs):
previous_init(self, *args, **kwargs)
if type(self) is cls:
self.__post_init__()
return new_init
cls.__init__ = init_decorator(cls.__init__)
def __post_init__(self):
"""Called automatically after the __init__ method of the subclasses.
The module first calls the __init__ method of the subclass, With in the
__init__ you should call the super().__init__ method. Then after the __init__
method of the subclass is called, the __post_init__ method is called.
This is a good place to do any initialization that requires access to the
subclass's attributes.
"""
self._input_specs = self.get_input_specs()
self._output_specs = self.get_output_specs()
def get_input_specs(self) -> Optional[Spec]:
"""Returns the input specs of this model.
Override `get_input_specs` to define your own input specs.
This method should not be called often, e.g. every forward pass.
Instead, it should be called once at instantiation to define Model.input_specs.
Returns:
Spec: The input specs.
"""
return None
def get_output_specs(self) -> Optional[Spec]:
"""Returns the output specs of this model.
Override `get_output_specs` to define your own output specs.
This method should not be called often, e.g. every forward pass.
Instead, it should be called once at instantiation to define Model.output_specs.
Returns:
Spec: The output specs.
"""
return None
@property
def input_specs(self) -> Spec:
"""Returns the input spec of this model."""
return self._input_specs
@input_specs.setter
def input_specs(self, spec: Spec) -> None:
raise ValueError(
"`input_specs` cannot be set directly. Override "
"Model.get_input_specs() instead. Set Model._input_specs if "
"you want to override this behavior."
)
@property
def output_specs(self) -> Spec:
"""Returns the output specs of this model."""
return self._output_specs
@output_specs.setter
def output_specs(self, spec: Spec) -> None:
raise ValueError(
"`output_specs` cannot be set directly. Override "
"Model.get_output_specs() instead. Set Model._output_specs if "
"you want to override this behavior."
)
def get_initial_state(self) -> Union[dict, List[TensorType]]:
"""Returns the initial state of the Model.
It can be left empty if this Model is not stateful.
"""
return dict()
@abc.abstractmethod
def _forward(self, input_dict: dict, **kwargs) -> dict:
"""Returns the output of this model for the given input.
This method is called by the forwarding method of the respective framework
that is itself wrapped by RLlib in order to check model inputs and outputs.
Args:
input_dict: The input tensors.
**kwargs: Forward compatibility kwargs.
Returns:
dict: The output tensors.
"""
@abc.abstractmethod
def get_num_parameters(self) -> Tuple[int, int]:
"""Returns a tuple of (num trainable params, num non-trainable params)."""
@abc.abstractmethod
def _set_to_dummy_weights(self, value_sequence=(-0.02, -0.01, 0.01, 0.02)) -> None:
"""Helper method to set all weights to deterministic dummy values.
Calling this method on two `Models` that have the same architecture using
the exact same `value_sequence` arg should make both models output the exact
same values on arbitrary inputs. This will work, even if the two `Models`
are of different DL frameworks.
Args:
value_sequence: Looping through the list of all parameters (weight matrices,
bias tensors, etc..) of this model, in each iteration i, we set all
values in this parameter to `value_sequence[i % len(value_sequence)]`
(round robin).
Example:
TODO:
"""
@ExperimentalAPI
| Model |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 7170,
"end": 7774
} | class ____(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
| ClientDisconnected |
python | django-import-export__django-import-export | tests/core/tests/test_tmp_storages.py | {
"start": 810,
"end": 913
} | class ____(TempFolderStorage):
def get_full_path(self):
return "/tmp/f"
| TestTempFolderStorage |
python | PrefectHQ__prefect | tests/server/models/test_workers.py | {
"start": 15570,
"end": 19994
} | class ____:
@pytest.fixture(autouse=True)
async def queues(self, session, work_pool):
queues = {}
# rename the default queue "A"
queues["A"] = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
queues["A"].name = "A"
# create B-E
for name in "BCDE":
queues[name] = await models.workers.create_work_queue(
session=session,
work_pool_id=work_pool.id,
work_queue=schemas.actions.WorkQueueCreate(name=name),
)
await session.commit()
return queues
@pytest.mark.parametrize(
"new_priorities",
[
{"A": 2},
{"A": 2, "B": 4},
{"A": 2, "B": 4, "C": 1},
{"A": 2, "B": 1},
{"B": 2, "C": 3, "D": 4, "E": 5},
{"A": 1, "B": 2, "C": 3, "D": 4, "E": 5},
],
)
async def test_bulk_update_priorities(
self, session, work_pool, queues, new_priorities
):
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert len(all_queues) == 5
await models.workers.bulk_update_work_queue_priorities(
session=session,
work_pool_id=work_pool.id,
new_priorities={queues[k].id: v for k, v in new_priorities.items()},
)
await session.commit()
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert len(all_queues) == 5
all_queues = {q.name: q for q in all_queues}
for k, v in new_priorities.items():
assert all_queues[k].priority == v
async def test_update_priorities_with_invalid_target_id(
self, session, work_pool, queues
):
await models.workers.bulk_update_work_queue_priorities(
session=session,
work_pool_id=work_pool.id,
new_priorities={uuid4(): 3, queues["A"].id: 4},
)
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert next(q.priority for q in all_queues if q.name == "A") == 4
async def test_update_priorities_with_duplicate_priorities(
self, session, work_pool, queues
):
with pytest.raises(ValueError, match="(Duplicate target priorities provided)"):
await models.workers.bulk_update_work_queue_priorities(
session=session,
work_pool_id=work_pool.id,
new_priorities={queues["A"]: 3, queues["B"].id: 3},
)
async def test_update_priorities_with_empty_new_priority(
self, session, work_pool, queues
):
await models.workers.bulk_update_work_queue_priorities(
session=session,
work_pool_id=work_pool.id,
new_priorities={},
)
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert {q.name: q.priority for q in all_queues} == {
"A": 1,
"B": 2,
"C": 3,
"D": 4,
"E": 5,
}
async def test_update_priorities_with_empty_new_priority_to_recompute(
self, session, db, work_pool, queues
):
# manually delete a queue (this won't trigger the automatic priority update)
await session.execute(
sa.delete(db.WorkQueue).where(db.WorkQueue.id == queues["C"].id)
)
await session.commit()
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert {q.name: q.priority for q in all_queues} == {
"A": 1,
"B": 2,
"D": 4,
"E": 5,
}
await models.workers.bulk_update_work_queue_priorities(
session=session,
work_pool_id=work_pool.id,
new_priorities={},
)
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=work_pool.id
)
assert {q.name: q.priority for q in all_queues} == {
"A": 1,
"B": 2,
"D": 4,
"E": 5,
}
| TestUpdateWorkQueuePriorities |
python | huggingface__transformers | tests/models/glpn/test_modeling_glpn.py | {
"start": 1677,
"end": 5406
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[16, 32, 64, 128],
downsampling_rates=[1, 4, 8, 16],
num_attention_heads=[1, 2, 4, 8],
is_training=True,
use_labels=True,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
decoder_hidden_size=16,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.sr_ratios = sr_ratios
self.depths = depths
self.hidden_sizes = hidden_sizes
self.downsampling_rates = downsampling_rates
self.num_attention_heads = num_attention_heads
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.decoder_hidden_size = decoder_hidden_size
self.num_labels = num_labels
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return GLPNConfig(
image_size=self.image_size,
num_channels=self.num_channels,
num_encoder_blocks=self.num_encoder_blocks,
depths=self.depths,
hidden_sizes=self.hidden_sizes,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
decoder_hidden_size=self.decoder_hidden_size,
)
def create_and_check_model(self, config, pixel_values, labels):
model = GLPNModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)
)
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = GLPNForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| GLPNModelTester |
python | huggingface__transformers | src/transformers/models/longcat_flash/modular_longcat_flash.py | {
"start": 1744,
"end": 1878
} | class ____(DeepseekV3RotaryEmbedding):
pass
# TODO remap config key ffn_hidden_size -> intermediate_size
| LongcatFlashRotaryEmbedding |
python | pypa__pip | src/pip/_vendor/rich/_win32_console.py | {
"start": 1544,
"end": 1801
} | class ____(Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
| CONSOLE_SCREEN_BUFFER_INFO |
python | getsentry__sentry | src/sentry/similarity/backends/abstract.py | {
"start": 42,
"end": 937
} | class ____(metaclass=ABCMeta):
@abstractmethod
def classify(self, scope, items, limit=None, timestamp=None):
pass
@abstractmethod
def compare(self, scope, key, items, limit=None, timestamp=None):
pass
@abstractmethod
def record(self, scope, key, items, timestamp=None):
pass
@abstractmethod
def merge(self, scope, destination, items, timestamp=None):
pass
@abstractmethod
def delete(self, scope, items, timestamp=None):
pass
@abstractmethod
def scan(self, scope, indices, batch=1000, timestamp=None):
pass
@abstractmethod
def flush(self, scope, indices, batch=1000, timestamp=None):
pass
@abstractmethod
def export(self, scope, items, timestamp=None):
pass
@abstractmethod
def import_(self, scope, items, timestamp=None):
pass
| AbstractIndexBackend |
python | gevent__gevent | src/greentest/3.14/test_urllib.py | {
"start": 59133,
"end": 71931
} | class ____(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
def test_pathname2url(self):
# Test cases common to Windows and POSIX.
fn = urllib.request.pathname2url
sep = os.path.sep
self.assertEqual(fn(''), '')
self.assertEqual(fn(sep), '///')
self.assertEqual(fn('a'), 'a')
self.assertEqual(fn(f'a{sep}b.c'), 'a/b.c')
self.assertEqual(fn(f'{sep}a{sep}b.c'), '///a/b.c')
self.assertEqual(fn(f'{sep}a{sep}b%#c'), '///a/b%25%23c')
def test_pathname2url_add_scheme(self):
sep = os.path.sep
subtests = [
('', 'file:'),
(sep, 'file:///'),
('a', 'file:a'),
(f'a{sep}b.c', 'file:a/b.c'),
(f'{sep}a{sep}b.c', 'file:///a/b.c'),
(f'{sep}a{sep}b%#c', 'file:///a/b%25%23c'),
]
for path, expected_url in subtests:
with self.subTest(path=path):
self.assertEqual(
urllib.request.pathname2url(path, add_scheme=True), expected_url)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to Windows pathnames.')
def test_pathname2url_win(self):
# Test special prefixes are correctly handled in pathname2url()
fn = urllib.request.pathname2url
self.assertEqual(fn('\\\\?\\C:\\dir'), '///C:/dir')
self.assertEqual(fn('\\\\?\\unc\\server\\share\\dir'), '//server/share/dir')
self.assertEqual(fn("C:"), '///C:')
self.assertEqual(fn("C:\\"), '///C:/')
self.assertEqual(fn('c:\\a\\b.c'), '///c:/a/b.c')
self.assertEqual(fn('C:\\a\\b.c'), '///C:/a/b.c')
self.assertEqual(fn('C:\\a\\b.c\\'), '///C:/a/b.c/')
self.assertEqual(fn('C:\\a\\\\b.c'), '///C:/a//b.c')
self.assertEqual(fn('C:\\a\\b%#c'), '///C:/a/b%25%23c')
self.assertEqual(fn('C:\\a\\b\xe9'), '///C:/a/b%C3%A9')
self.assertEqual(fn('C:\\foo\\bar\\spam.foo'), "///C:/foo/bar/spam.foo")
# NTFS alternate data streams
self.assertEqual(fn('C:\\foo:bar'), '///C:/foo%3Abar')
self.assertEqual(fn('foo:bar'), 'foo%3Abar')
# No drive letter
self.assertEqual(fn("\\folder\\test\\"), '///folder/test/')
self.assertEqual(fn("\\\\folder\\test\\"), '//folder/test/')
self.assertEqual(fn("\\\\\\folder\\test\\"), '///folder/test/')
self.assertEqual(fn('\\\\some\\share\\'), '//some/share/')
self.assertEqual(fn('\\\\some\\share\\a\\b.c'), '//some/share/a/b.c')
self.assertEqual(fn('\\\\some\\share\\a\\b%#c\xe9'), '//some/share/a/b%25%23c%C3%A9')
# Alternate path separator
self.assertEqual(fn('C:/a/b.c'), '///C:/a/b.c')
self.assertEqual(fn('//some/share/a/b.c'), '//some/share/a/b.c')
self.assertEqual(fn('//?/C:/dir'), '///C:/dir')
self.assertEqual(fn('//?/unc/server/share/dir'), '//server/share/dir')
# Round-tripping
urls = ['///C:',
'///folder/test/',
'///C:/foo/bar/spam.foo']
for url in urls:
self.assertEqual(fn(urllib.request.url2pathname(url)), url)
@unittest.skipIf(sys.platform == 'win32',
'test specific to POSIX pathnames')
def test_pathname2url_posix(self):
fn = urllib.request.pathname2url
self.assertEqual(fn('//a/b.c'), '////a/b.c')
self.assertEqual(fn('///a/b.c'), '/////a/b.c')
self.assertEqual(fn('////a/b.c'), '//////a/b.c')
@unittest.skipUnless(os_helper.FS_NONASCII, 'need os_helper.FS_NONASCII')
def test_pathname2url_nonascii(self):
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
url = urllib.parse.quote(os_helper.FS_NONASCII, encoding=encoding, errors=errors)
self.assertEqual(urllib.request.pathname2url(os_helper.FS_NONASCII), url)
def test_url2pathname(self):
# Test cases common to Windows and POSIX.
fn = urllib.request.url2pathname
sep = os.path.sep
self.assertEqual(fn(''), '')
self.assertEqual(fn('/'), f'{sep}')
self.assertEqual(fn('///'), f'{sep}')
self.assertEqual(fn('////'), f'{sep}{sep}')
self.assertEqual(fn('foo'), 'foo')
self.assertEqual(fn('foo/bar'), f'foo{sep}bar')
self.assertEqual(fn('/foo/bar'), f'{sep}foo{sep}bar')
self.assertEqual(fn('//localhost/foo/bar'), f'{sep}foo{sep}bar')
self.assertEqual(fn('///foo/bar'), f'{sep}foo{sep}bar')
self.assertEqual(fn('////foo/bar'), f'{sep}{sep}foo{sep}bar')
self.assertEqual(fn('data:blah'), 'data:blah')
self.assertEqual(fn('data://blah'), f'data:{sep}{sep}blah')
self.assertEqual(fn('foo?bar'), 'foo')
self.assertEqual(fn('foo#bar'), 'foo')
self.assertEqual(fn('foo?bar=baz'), 'foo')
self.assertEqual(fn('foo?bar#baz'), 'foo')
self.assertEqual(fn('foo%3Fbar'), 'foo?bar')
self.assertEqual(fn('foo%23bar'), 'foo#bar')
self.assertEqual(fn('foo%3Fbar%3Dbaz'), 'foo?bar=baz')
self.assertEqual(fn('foo%3Fbar%23baz'), 'foo?bar#baz')
def test_url2pathname_require_scheme(self):
sep = os.path.sep
subtests = [
('file:', ''),
('FILE:', ''),
('FiLe:', ''),
('file:/', f'{sep}'),
('file:///', f'{sep}'),
('file:////', f'{sep}{sep}'),
('file:foo', 'foo'),
('file:foo/bar', f'foo{sep}bar'),
('file:/foo/bar', f'{sep}foo{sep}bar'),
('file://localhost/foo/bar', f'{sep}foo{sep}bar'),
('file:///foo/bar', f'{sep}foo{sep}bar'),
('file:////foo/bar', f'{sep}{sep}foo{sep}bar'),
('file:data:blah', 'data:blah'),
('file:data://blah', f'data:{sep}{sep}blah'),
]
for url, expected_path in subtests:
with self.subTest(url=url):
self.assertEqual(
urllib.request.url2pathname(url, require_scheme=True),
expected_path)
def test_url2pathname_require_scheme_errors(self):
subtests = [
'',
':',
'foo',
'http:foo',
'localfile:foo',
'data:foo',
'data:file:foo',
'data:file://foo',
]
for url in subtests:
with self.subTest(url=url):
self.assertRaises(
urllib.error.URLError,
urllib.request.url2pathname,
url, require_scheme=True)
@unittest.skipIf(support.is_emscripten, "Fixed by https://github.com/emscripten-core/emscripten/pull/24593")
def test_url2pathname_resolve_host(self):
fn = urllib.request.url2pathname
sep = os.path.sep
self.assertEqual(fn('//127.0.0.1/foo/bar', resolve_host=True), f'{sep}foo{sep}bar')
self.assertEqual(fn(f'//{socket.gethostname()}/foo/bar'), f'{sep}foo{sep}bar')
self.assertEqual(fn(f'//{socket.gethostname()}/foo/bar', resolve_host=True), f'{sep}foo{sep}bar')
@unittest.skipUnless(sys.platform == 'win32',
'test specific to Windows pathnames.')
def test_url2pathname_win(self):
fn = urllib.request.url2pathname
self.assertEqual(fn('/C:/'), 'C:\\')
self.assertEqual(fn("///C|"), 'C:')
self.assertEqual(fn("///C:"), 'C:')
self.assertEqual(fn('///C:/'), 'C:\\')
self.assertEqual(fn('/C|//'), 'C:\\\\')
self.assertEqual(fn('///C|/path'), 'C:\\path')
# No DOS drive
self.assertEqual(fn("///C/test/"), '\\C\\test\\')
self.assertEqual(fn("////C/test/"), '\\\\C\\test\\')
# DOS drive paths
self.assertEqual(fn('c:/path/to/file'), 'c:\\path\\to\\file')
self.assertEqual(fn('C:/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn('C:/path/to/file/'), 'C:\\path\\to\\file\\')
self.assertEqual(fn('C:/path/to//file'), 'C:\\path\\to\\\\file')
self.assertEqual(fn('C|/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn('/C|/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn('///C|/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn("///C|/foo/bar/spam.foo"), 'C:\\foo\\bar\\spam.foo')
# Colons in URI
self.assertEqual(fn('///\u00e8|/'), '\u00e8:\\')
self.assertEqual(fn('//host/share/spam.txt:eggs'), '\\\\host\\share\\spam.txt:eggs')
self.assertEqual(fn('///c:/spam.txt:eggs'), 'c:\\spam.txt:eggs')
# UNC paths
self.assertEqual(fn('//server/path/to/file'), '\\\\server\\path\\to\\file')
self.assertEqual(fn('////server/path/to/file'), '\\\\server\\path\\to\\file')
self.assertEqual(fn('/////server/path/to/file'), '\\\\server\\path\\to\\file')
self.assertEqual(fn('//127.0.0.1/path/to/file'), '\\\\127.0.0.1\\path\\to\\file')
# Localhost paths
self.assertEqual(fn('//localhost/C:/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn('//localhost/C|/path/to/file'), 'C:\\path\\to\\file')
self.assertEqual(fn('//localhost/path/to/file'), '\\path\\to\\file')
self.assertEqual(fn('//localhost//server/path/to/file'), '\\\\server\\path\\to\\file')
# Percent-encoded forward slashes are preserved for backwards compatibility
self.assertEqual(fn('C:/foo%2fbar'), 'C:\\foo/bar')
self.assertEqual(fn('//server/share/foo%2fbar'), '\\\\server\\share\\foo/bar')
# Round-tripping
paths = ['C:',
r'\C\test\\',
r'C:\foo\bar\spam.foo']
for path in paths:
self.assertEqual(fn(urllib.request.pathname2url(path)), path)
@unittest.skipIf(sys.platform == 'win32',
'test specific to POSIX pathnames')
def test_url2pathname_posix(self):
fn = urllib.request.url2pathname
self.assertRaises(urllib.error.URLError, fn, '//foo/bar')
self.assertRaises(urllib.error.URLError, fn, '//localhost:/foo/bar')
self.assertRaises(urllib.error.URLError, fn, '//:80/foo/bar')
self.assertRaises(urllib.error.URLError, fn, '//:/foo/bar')
self.assertRaises(urllib.error.URLError, fn, '//c:80/foo/bar')
self.assertRaises(urllib.error.URLError, fn, '//127.0.0.1/foo/bar')
@unittest.skipUnless(os_helper.FS_NONASCII, 'need os_helper.FS_NONASCII')
def test_url2pathname_nonascii(self):
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
url = os_helper.FS_NONASCII
self.assertEqual(urllib.request.url2pathname(url), os_helper.FS_NONASCII)
url = urllib.parse.quote(url, encoding=encoding, errors=errors)
self.assertEqual(urllib.request.url2pathname(url), os_helper.FS_NONASCII)
| Pathname_Tests |
python | milvus-io__pymilvus | pymilvus/client/interceptor.py | {
"start": 3099,
"end": 3215
} | class ____(NamedTuple):
method: Any
timeout: Any
metadata: Any
credentials: Any
| ClientCallDetailsTuple |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/run_request.py | {
"start": 12298,
"end": 16425
} | class ____(
NamedTuple(
"_SensorResult",
[
("run_requests", Optional[Sequence[RunRequest]]),
("skip_reason", Optional[SkipReason]),
("cursor", Optional[str]),
(
"dynamic_partitions_requests",
Optional[
Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]
],
),
(
"asset_events",
list[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]],
),
(
"automation_condition_evaluations",
Optional[Sequence[AutomationConditionEvaluation[EntityKey]]],
),
],
)
):
"""The result of a sensor evaluation.
Args:
run_requests (Optional[Sequence[RunRequest]]): A list of run requests to be executed.
skip_reason (Optional[Union[str, SkipReason]]): A skip message indicating why sensor
evaluation was skipped.
cursor (Optional[str]): The cursor value for this sensor, which will be provided on the
context for the next sensor evaluation.
dynamic_partitions_requests (Optional[Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]]): A list of dynamic partition requests to request dynamic
partition addition and deletion. Run requests will be evaluated using the state of the
partitions with these changes applied. We recommend limiting partition additions
and deletions to a maximum of 25K partitions per sensor evaluation, as this is the maximum
recommended partition limit per asset.
asset_events (Optional[Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]]): A
list of materializations, observations, and asset check evaluations that the system
will persist on your behalf at the end of sensor evaluation. These events will be not
be associated with any particular run, but will be queryable and viewable in the asset catalog.
"""
def __new__(
cls,
run_requests: Optional[Sequence[RunRequest]] = None,
skip_reason: Optional[Union[str, SkipReason]] = None,
cursor: Optional[str] = None,
dynamic_partitions_requests: Optional[
Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]
] = None,
asset_events: Optional[
Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]
] = None,
**kwargs,
):
if skip_reason and len(run_requests if run_requests else []) > 0:
check.failed(
"Expected a single skip reason or one or more run requests: received values for "
"both run_requests and skip_reason"
)
skip_reason = check.opt_inst_param(skip_reason, "skip_reason", (SkipReason, str))
if isinstance(skip_reason, str):
skip_reason = SkipReason(skip_reason)
return super().__new__(
cls,
run_requests=check.opt_sequence_param(run_requests, "run_requests", RunRequest),
skip_reason=skip_reason,
cursor=check.opt_str_param(cursor, "cursor"),
dynamic_partitions_requests=check.opt_sequence_param(
dynamic_partitions_requests,
"dynamic_partitions_requests",
(AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest),
),
asset_events=list(
check.opt_sequence_param(
asset_events,
"asset_check_evaluations",
(AssetObservation, AssetMaterialization, AssetCheckEvaluation),
)
),
automation_condition_evaluations=check.opt_sequence_param(
kwargs.get("automation_condition_evaluations"),
"automation_condition_evaluations",
AutomationConditionEvaluation,
),
)
| SensorResult |
python | pypa__pipenv | pipenv/vendor/click/types.py | {
"start": 6623,
"end": 7427
} | class ____(ParamType):
name = "text"
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = sys.getfilesystemencoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return str(value)
def __repr__(self) -> str:
return "STRING"
| StringParamType |
python | django__django | tests/urlpatterns_reverse/tests.py | {
"start": 63304,
"end": 63847
} | class ____(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, "view must be a callable"):
path("uncallable-object/", views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
| ErroneousViewTests |
python | kubernetes-client__python | kubernetes/client/models/v1_affinity.py | {
"start": 383,
"end": 5091
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'node_affinity': 'V1NodeAffinity',
'pod_affinity': 'V1PodAffinity',
'pod_anti_affinity': 'V1PodAntiAffinity'
}
attribute_map = {
'node_affinity': 'nodeAffinity',
'pod_affinity': 'podAffinity',
'pod_anti_affinity': 'podAntiAffinity'
}
def __init__(self, node_affinity=None, pod_affinity=None, pod_anti_affinity=None, local_vars_configuration=None): # noqa: E501
"""V1Affinity - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._node_affinity = None
self._pod_affinity = None
self._pod_anti_affinity = None
self.discriminator = None
if node_affinity is not None:
self.node_affinity = node_affinity
if pod_affinity is not None:
self.pod_affinity = pod_affinity
if pod_anti_affinity is not None:
self.pod_anti_affinity = pod_anti_affinity
@property
def node_affinity(self):
"""Gets the node_affinity of this V1Affinity. # noqa: E501
:return: The node_affinity of this V1Affinity. # noqa: E501
:rtype: V1NodeAffinity
"""
return self._node_affinity
@node_affinity.setter
def node_affinity(self, node_affinity):
"""Sets the node_affinity of this V1Affinity.
:param node_affinity: The node_affinity of this V1Affinity. # noqa: E501
:type: V1NodeAffinity
"""
self._node_affinity = node_affinity
@property
def pod_affinity(self):
"""Gets the pod_affinity of this V1Affinity. # noqa: E501
:return: The pod_affinity of this V1Affinity. # noqa: E501
:rtype: V1PodAffinity
"""
return self._pod_affinity
@pod_affinity.setter
def pod_affinity(self, pod_affinity):
"""Sets the pod_affinity of this V1Affinity.
:param pod_affinity: The pod_affinity of this V1Affinity. # noqa: E501
:type: V1PodAffinity
"""
self._pod_affinity = pod_affinity
@property
def pod_anti_affinity(self):
"""Gets the pod_anti_affinity of this V1Affinity. # noqa: E501
:return: The pod_anti_affinity of this V1Affinity. # noqa: E501
:rtype: V1PodAntiAffinity
"""
return self._pod_anti_affinity
@pod_anti_affinity.setter
def pod_anti_affinity(self, pod_anti_affinity):
"""Sets the pod_anti_affinity of this V1Affinity.
:param pod_anti_affinity: The pod_anti_affinity of this V1Affinity. # noqa: E501
:type: V1PodAntiAffinity
"""
self._pod_anti_affinity = pod_anti_affinity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Affinity):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Affinity):
return True
return self.to_dict() != other.to_dict()
| V1Affinity |
python | numba__numba | numba/core/interpreter.py | {
"start": 839,
"end": 1149
} | class ____(object):
"""Represents an unknown value, this is for ease of debugging purposes only.
"""
def __init__(self, varname):
self._varname = varname
def __repr__(self):
return "_UNKNOWN_VALUE({})".format(self._varname)
_logger = logging.getLogger(__name__)
| _UNKNOWN_VALUE |
python | huggingface__transformers | tests/utils/test_model_output.py | {
"start": 6221,
"end": 6424
} | class ____(ModelOutput):
"""Invalid test subclass of ModelOutput where @dataclass decorator is not used"""
a: float
b: float | None = None
c: float | None = None
| ModelOutputTestNoDataclass |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 41956,
"end": 42203
} | class ____(CacheArtifact):
@override
def populate_cache(self) -> None:
FxGraphCache._write_to_local_cache(self.key, self.content)
@override
@staticmethod
def type() -> str:
return "inductor"
| InductorCacheArtifact |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 58355,
"end": 61995
} | class ____(BaseDataset):
"""
Feature: Compound types correctly round-trip
"""
def test_rt(self):
""" Compound types are read back in correct order (issue 236)"""
dt = np.dtype([ ('weight', np.float64),
('cputime', np.float64),
('walltime', np.float64),
('parents_offset', np.uint32),
('n_parents', np.uint32),
('status', np.uint8),
('endpoint_type', np.uint8), ])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random((16,)) * 100
name = make_name()
self.f[name] = testdata
outdata = self.f[name][...]
self.assertTrue(np.all(outdata == testdata))
self.assertEqual(outdata.dtype, testdata.dtype)
def test_assign(self):
dt = np.dtype([ ('weight', (np.float64, 3)),
('endpoint_type', np.uint8), ])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random(size=testdata[key].shape) * 100
name = make_name()
ds = self.f.create_dataset(name, (16,), dtype=dt)
for key in dt.fields:
ds[key] = testdata[key]
outdata = self.f[name][...]
self.assertTrue(np.all(outdata == testdata))
self.assertEqual(outdata.dtype, testdata.dtype)
def test_fields(self):
dt = np.dtype([
('x', np.float64),
('y', np.float64),
('z', np.float64),
])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random((16,)) * 100
name = make_name()
self.f[name] = testdata
ds = self.f[name]
# Extract multiple fields
np.testing.assert_array_equal(
ds.fields(['x', 'y'])[:], testdata[['x', 'y']]
)
# Extract single field
np.testing.assert_array_equal(
ds.fields('x')[:], testdata['x']
)
# Check __array__() method of fields wrapper
np.testing.assert_array_equal(
np.asarray(ds.fields(['x', 'y'])), testdata[['x', 'y']]
)
# Check type conversion of __array__() method
dt_int = np.dtype([('x', np.int32)])
np.testing.assert_array_equal(
np.asarray(ds.fields(['x']), dtype=dt_int),
testdata[['x']].astype(dt_int)
)
# Check len() on fields wrapper
assert len(ds.fields('x')) == 16
def test_nested_compound_vlen(self):
dt_inner = np.dtype([('a', h5py.vlen_dtype(np.int32)),
('b', h5py.vlen_dtype(np.int32))])
dt = np.dtype([('f1', h5py.vlen_dtype(dt_inner)),
('f2', np.int64)])
inner1 = (np.array(range(1, 3), dtype=np.int32),
np.array(range(6, 9), dtype=np.int32))
inner2 = (np.array(range(10, 14), dtype=np.int32),
np.array(range(16, 21), dtype=np.int32))
data = np.array([(np.array([inner1, inner2], dtype=dt_inner), 2),
(np.array([inner1], dtype=dt_inner), 3)],
dtype=dt)
name = make_name()
self.f[name] = data
out = self.f[name]
# Specifying check_alignment=False because vlen fields have 8 bytes of padding
# because the vlen datatype in hdf5 occupies 16 bytes
self.assertArrayEqual(out, data, check_alignment=False)
| TestCompound |
python | pytest-dev__pytest | testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_modulelevel.py | {
"start": 158,
"end": 319
} | class ____:
def test_method(self, something):
assert something == "test_method"
def test_func(something):
assert something == "test_func"
| TestClass |
python | redis__redis-py | redis/asyncio/connection.py | {
"start": 29382,
"end": 31812
} | class ____(Connection):
"""Manages SSL connections to and from the Redis server(s).
This class extends the Connection class, adding SSL functionality, and making
use of ssl.SSLContext (https://docs.python.org/3/library/ssl.html#ssl.SSLContext)
"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
ssl_cert_reqs: Union[str, ssl.VerifyMode] = "required",
ssl_include_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
ssl_exclude_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
ssl_ca_certs: Optional[str] = None,
ssl_ca_data: Optional[str] = None,
ssl_check_hostname: bool = True,
ssl_min_version: Optional[TLSVersion] = None,
ssl_ciphers: Optional[str] = None,
**kwargs,
):
if not SSL_AVAILABLE:
raise RedisError("Python wasn't built with SSL support")
self.ssl_context: RedisSSLContext = RedisSSLContext(
keyfile=ssl_keyfile,
certfile=ssl_certfile,
cert_reqs=ssl_cert_reqs,
include_verify_flags=ssl_include_verify_flags,
exclude_verify_flags=ssl_exclude_verify_flags,
ca_certs=ssl_ca_certs,
ca_data=ssl_ca_data,
check_hostname=ssl_check_hostname,
min_version=ssl_min_version,
ciphers=ssl_ciphers,
)
super().__init__(**kwargs)
def _connection_arguments(self) -> Mapping:
kwargs = super()._connection_arguments()
kwargs["ssl"] = self.ssl_context.get()
return kwargs
@property
def keyfile(self):
return self.ssl_context.keyfile
@property
def certfile(self):
return self.ssl_context.certfile
@property
def cert_reqs(self):
return self.ssl_context.cert_reqs
@property
def include_verify_flags(self):
return self.ssl_context.include_verify_flags
@property
def exclude_verify_flags(self):
return self.ssl_context.exclude_verify_flags
@property
def ca_certs(self):
return self.ssl_context.ca_certs
@property
def ca_data(self):
return self.ssl_context.ca_data
@property
def check_hostname(self):
return self.ssl_context.check_hostname
@property
def min_version(self):
return self.ssl_context.min_version
| SSLConnection |
python | plotly__plotly.py | plotly/graph_objs/isosurface/_slices.py | {
"start": 233,
"end": 3891
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface"
_path_str = "isosurface.slices"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.slices.X`
- A dict of string/value properties that will be passed
to the X constructor
Returns
-------
plotly.graph_objs.isosurface.slices.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.slices.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Returns
-------
plotly.graph_objs.isosurface.slices.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.slices.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Returns
-------
plotly.graph_objs.isosurface.slices.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.isosurface.slices.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.slices.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.slices.Z`
instance or dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Slices object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Slices`
x
:class:`plotly.graph_objects.isosurface.slices.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.slices.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.slices.Z`
instance or dict with compatible properties
Returns
-------
Slices
"""
super().__init__("slices")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.Slices
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Slices`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Slices |
python | ApeWorX__ape | src/ape_console/_cli.py | {
"start": 1910,
"end": 7987
} | class ____(dict):
def __init__(self, **kwargs):
# Initialize the dictionary with provided keyword arguments
project = kwargs.get("project", self._ape.project)
kwargs["project"] = self._ape.Project(project) if isinstance(project, Path) else project
super().__init__(**kwargs)
def __getitem__(self, key: str):
# First, attempt to retrieve the key from the dictionary itself
if super().__contains__(key):
return super().__getitem__(key)
# Custom behavior for "ape" key
if key == "ape":
res = self._ape
self[key] = res # Cache the result
return res
# Attempt to get the key from extras
try:
res = self._get_extra(key)
except KeyError:
pass
else:
self[key] = res # Cache the result
return res
# Attempt to retrieve the key from the Ape module.
try:
res = self._get_from_ape(key)
except AttributeError:
raise KeyError(key)
# Cache the result and return
self[key] = res
return res
def __setitem__(self, key, value):
# Override to set items directly into the dictionary
super().__setitem__(key, value)
def __contains__(self, item: str) -> bool: # type: ignore
return self.get(item) is not None
def update(self, mapping, **kwargs) -> None: # type: ignore
# Override to update the dictionary directly
super().update(mapping, **kwargs)
@property
def _ape(self) -> ModuleType:
return import_module("ape")
@cached_property
def _local_path(self) -> Path:
return self["project"].path.joinpath(CONSOLE_EXTRAS_FILENAME)
@cached_property
def _global_path(self) -> Path:
return self._ape.config.DATA_FOLDER.joinpath(CONSOLE_EXTRAS_FILENAME)
@cached_property
def _local_extras(self) -> dict:
return self._load_extras_file(self._local_path)
@cached_property
def _global_extras(self) -> dict:
return self._load_extras_file(self._global_path)
def get(self, key: str, default: Optional[Any] = None):
try:
return self.__getitem__(key)
except KeyError:
return default
def _get_extra(self, key: str):
try:
return self._local_extras[key]
except KeyError:
return self._global_extras[key]
def _get_from_ape(self, key: str):
return getattr(self._ape, key)
def _load_extras_file(self, extras_file: Path) -> dict:
if not extras_file.is_file():
return {}
module = import_extras_file(extras_file)
ape_init_extras = getattr(module, "ape_init_extras", None)
all_extras: dict = {}
if ape_init_extras is not None:
func_spec = inspect.getfullargspec(ape_init_extras)
init_kwargs: dict[str, Any] = {k: self._get_from_ape(k) for k in func_spec.args}
extras = ape_init_extras(**init_kwargs)
if isinstance(extras, dict):
all_extras.update(extras)
all_extras.update({k: getattr(module, k) for k in dir(module) if k not in all_extras})
return all_extras
def console(
project: Optional[Union["ProjectManager", Path]] = None,
verbose: bool = False,
extra_locals: Optional[dict] = None,
embed: bool = False,
code: Optional[list[str]] = None,
):
import IPython
from IPython.terminal.ipapp import Config as IPythonConfig
from ape.utils.misc import _python_version
from ape.version import version as ape_version
extra_locals = extra_locals or {}
if project is None:
from ape.utils.basemodel import ManagerAccessMixin
project = ManagerAccessMixin.local_project
else:
extra_locals["project"] = project
project_path: Path = project if isinstance(project, Path) else project.path
banner = ""
if verbose:
banner = f"""
Python: {_python_version}
IPython: {IPython.__version__}
Ape: {ape_version}
Project: {project_path}
Are you ready to Ape, anon?
"""
if not environ.get("APE_TESTING"):
faulthandler.enable() # NOTE: In case we segfault
# Allows modules relative to the project.
sys.path.insert(0, f"{project_path}")
ipy_config = IPythonConfig()
ape_testing = environ.get("APE_TESTING")
if ape_testing:
ipy_config.HistoryManager.enabled = False
# Required for click.testing.CliRunner support.
embed = True
namespace = _create_namespace(**extra_locals)
_launch_console(namespace, ipy_config, embed, banner, code=code)
def _create_namespace(**values) -> dict:
# Abstracted for testing purposes.
return ApeConsoleNamespace(**values)
def _launch_console(
namespace: dict,
ipy_config: "IPythonConfig",
embed: bool,
banner: str,
code: Optional[list[str]],
):
import IPython
from ape_console.config import ConsoleConfig
ipython_kwargs = {"user_ns": namespace, "config": ipy_config}
if code:
_execute_code(code, **ipython_kwargs)
elif embed:
IPython.embed(**ipython_kwargs, colors="Neutral", banner1=banner)
else:
ipy_config.TerminalInteractiveShell.colors = "Neutral"
ipy_config.TerminalInteractiveShell.banner1 = banner
console_config = cast(ConsoleConfig, namespace["ape"].config.get_config("console"))
ipy_config.InteractiveShellApp.extensions.append("ape_console.plugin")
if console_config.plugins:
ipy_config.InteractiveShellApp.extensions.extend(console_config.plugins)
IPython.start_ipython(**ipython_kwargs, argv=())
def _execute_code(code: list[str], **ipython_kwargs):
from IPython import InteractiveShell
shell = InteractiveShell.instance(**ipython_kwargs)
# NOTE: Using `store_history=True` just so the cell IDs are accurate.
for line in code:
shell.run_cell(line, store_history=True)
| ApeConsoleNamespace |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 38154,
"end": 39581
} | class ____(Response):
"""
Response of queues.get_default endpoint.
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
"""
_service = "queues"
_action = "get_default"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "Queue id", "type": ["string", "null"]},
"name": {"description": "Queue name", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, id: Optional[str] = None, name: Optional[str] = None, **kwargs: Any) -> None:
super(GetDefaultResponse, self).__init__(**kwargs)
self.id = id
self.name = name
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
| GetDefaultResponse |
python | scipy__scipy | scipy/optimize/_trustregion_exact.py | {
"start": 5830,
"end": 16672
} | class ____(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
# The subproblem may iterate infinitely for problematic
# cases (see https://github.com/scipy/scipy/issues/12513).
# When the `maxiter` setting is None, we need to apply a
# default. An ad-hoc number (though tested quite extensively)
# is 25, which is set below. To restore the old behavior (which
# potentially hangs), this parameter may be changed to zero:
MAXITER_DEFAULT = 25 # use np.inf for infinite number of iterations
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2, maxiter=None):
super().__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to determine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# ``maxiter`` optionally limits the number of iterations
# the solve method may perform. Useful for poorly conditioned
# problems which may otherwise hang.
self.maxiter = self.MAXITER_DEFAULT if maxiter is None else maxiter
if self.maxiter < 0:
raise ValueError("maxiter must not be set to a negative number"
", use np.inf to mean infinite.")
# Get Lapack function for cholesky decomposition.
# The implemented SciPy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaller than that
# backward substitution is not reliable. It was established
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while self.niter < self.maxiter:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successful factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = ((step_len**2 * s_min**2)
/ (quadratic_term + lambda_current*tr_radius**2))
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertainty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeeded
#
if info == 0: # Successful factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessful factorization
# Update uncertainty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(
np.sqrt(np.abs(lambda_lb * lambda_ub)),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
)
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertainty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
p = step_len * z_min
# Check stop criteria
if (step_len**2 * s_min**2
<= self.k_hard * lambda_current * tr_radius**2):
break
# Update uncertainty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(
np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
)
else: # Unsuccessful factorization
# Compute auxiliary terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertainty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(
np.sqrt(np.abs(lambda_lb * lambda_ub)),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
)
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary
| IterativeSubproblem |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 59263,
"end": 61620
} | class ____:
async def test_timeout_async_task(self):
@task(timeout_seconds=0.1)
async def async_task():
await asyncio.sleep(2)
with pytest.raises(TimeoutError, match=".*timed out after 0.1 second(s)*"):
await run_task_async(async_task)
@pytest.mark.xfail(
reason="Synchronous sleep in an async task is not interruptible by async timeout"
)
async def test_timeout_async_task_with_sync_sleep(self):
@task(timeout_seconds=0.1)
async def async_task():
time.sleep(2)
with pytest.raises(TimeoutError, match=".*timed out after 0.1 second(s)*"):
await run_task_async(async_task)
async def test_timeout_sync_task(self):
@task(timeout_seconds=0.1)
def sync_task():
time.sleep(2)
with pytest.raises(TimeoutError, match=".*timed out after 0.1 second(s)*"):
run_task_sync(sync_task)
async def test_timeout_concurrency_slot_released_sync(
self, concurrency_limit_v2: ConcurrencyLimitV2, prefect_client: PrefectClient
):
@task(timeout_seconds=0.5)
def expensive_task():
with concurrency(concurrency_limit_v2.name):
time.sleep(1)
with pytest.raises(TimeoutError):
expensive_task()
response = await prefect_client.read_global_concurrency_limit_by_name(
concurrency_limit_v2.name
)
assert response.active_slots == 0
async def test_timeout_concurrency_slot_released_async(
self, concurrency_limit_v2: ConcurrencyLimitV2, prefect_client: PrefectClient
):
@task(timeout_seconds=0.5)
async def expensive_task():
async with aconcurrency(concurrency_limit_v2.name):
await asyncio.sleep(1)
with pytest.raises(TimeoutError):
await expensive_task()
response = await prefect_client.read_global_concurrency_limit_by_name(
concurrency_limit_v2.name
)
assert response.active_slots == 0
async def test_does_not_raise_timeout_error_when_async_task_is_cancelled(self):
@task(timeout_seconds=10)
async def async_task():
raise asyncio.CancelledError()
with pytest.raises(asyncio.CancelledError):
await async_task()
| TestTimeout |
python | huggingface__transformers | src/transformers/models/internvl/modeling_internvl.py | {
"start": 6974,
"end": 7386
} | class ____(BaseModelOutputWithPooling):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
"""
| InternVLVisionModelOutputWithPooling |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 1961,
"end": 2041
} | class ____(Cuda_array_decl):
key = cuda.local.array
@register
| Cuda_local_array |
python | huggingface__transformers | src/transformers/models/bridgetower/configuration_bridgetower.py | {
"start": 857,
"end": 3809
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in visual encoder model.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
image_size (`int`, *optional*, defaults to 288):
The size (resolution) of each image.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
stop_gradient (`bool`, *optional*, defaults to `False`):
Whether to stop gradient for training.
share_layernorm (`bool`, *optional*, defaults to `True`):
Whether LayerNorm layers are shared.
remove_last_layer (`bool`, *optional*, defaults to `False`):
Whether to remove the last layer from the vision encoder.
Example:
```python
>>> from transformers import BridgeTowerVisionConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the vision model
>>> configuration = BridgeTowerVisionConfig()
>>> # Accessing the configuration
>>> configuration
```"""
model_type = "bridgetower_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_channels=3,
patch_size=16,
image_size=288,
initializer_factor=1,
layer_norm_eps=1e-05,
stop_gradient=False,
share_layernorm=True,
remove_last_layer=False,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.stop_gradient = stop_gradient
self.share_layernorm = share_layernorm
self.remove_last_layer = remove_last_layer
| BridgeTowerVisionConfig |
python | xlwings__xlwings | xlwings/main.py | {
"start": 150109,
"end": 151076
} | class ____(Apps):
def __init__(self):
pass
_name = "Apps"
@property
def impl(self):
if engines.active is None:
if not (
sys.platform.startswith("darwin") or sys.platform.startswith("win")
):
raise XlwingsError(
"The interactive mode of xlwings is only supported on Windows and "
"macOS. On Linux, you can use xlwings Server or xlwings Reader."
)
elif sys.platform.startswith("darwin"):
raise XlwingsError(
'Make sure to have "appscript" and "psutil", '
"dependencies of xlwings, installed."
)
elif sys.platform.startswith("win"):
raise XlwingsError(
'Make sure to have "pywin32", a dependency of xlwings, installed.'
)
return engines.active.apps.impl
| ActiveEngineApps |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/credentials.py | {
"start": 853,
"end": 4147
} | class ____(Block):
"""
Stores configuration for interaction with Kubernetes clusters.
See `from_file` for creation.
Attributes:
config: The entire loaded YAML contents of a kubectl config file
context_name: The name of the kubectl context to use
Example:
Load a saved Kubernetes cluster config:
```python
from prefect_kubernetes.credentials import import KubernetesClusterConfig
cluster_config_block = KubernetesClusterConfig.load("BLOCK_NAME")
```
"""
_block_type_name = "Kubernetes Cluster Config"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png"
_documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes" # noqa
config: Dict = Field(
default=..., description="The entire contents of a kubectl config file."
)
context_name: str = Field(
default=..., description="The name of the kubectl context to use."
)
@field_validator("config", mode="before")
@classmethod
def parse_yaml_config(cls, value):
if isinstance(value, str):
return yaml.safe_load(value)
return value
@classmethod
def from_file(
cls: Type[Self], path: Optional[Path] = None, context_name: Optional[str] = None
) -> Self:
"""
Create a cluster config from the a Kubernetes config file.
By default, the current context in the default Kubernetes config file will be
used.
An alternative file or context may be specified.
The entire config file will be loaded and stored.
"""
path = Path(path or config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION)
path = path.expanduser().resolve()
# Determine the context
(
existing_contexts,
current_context,
) = config.kube_config.list_kube_config_contexts(config_file=str(path))
context_names = {ctx["name"] for ctx in existing_contexts}
if context_name:
if context_name not in context_names:
raise ValueError(
f"Context {context_name!r} not found. "
f"Specify one of: {listrepr(context_names, sep=', ')}."
)
else:
context_name = current_context["name"]
# Load the entire config file
config_file_contents = path.read_text()
config_dict = yaml.safe_load(config_file_contents)
return cls(config=config_dict, context_name=context_name)
async def get_api_client(self) -> "ApiClient":
"""
Returns a Kubernetes API client for this cluster config.
"""
return await config.kube_config.new_client_from_config_dict(
config_dict=self.config, context=self.context_name
)
async def configure_client(self) -> None:
"""
Activates this cluster configuration by loading the configuration into the
Kubernetes Python client. After calling this, Kubernetes API clients can use
this config's context.
"""
await config.kube_config.load_kube_config_from_dict(
config_dict=self.config, context=self.context_name
)
| KubernetesClusterConfig |
python | streamlit__streamlit | lib/tests/streamlit/watcher/event_based_path_watcher_test.py | {
"start": 799,
"end": 18708
} | class ____(unittest.TestCase):
"""Test EventBasedPathWatcher."""
def setUp(self):
# This test suite patches MultiPathWatcher. A MultiPathWatcher may
# already exist (another test may have directly or indirectly created
# one), so we first close any existing watcher instance here.
if event_based_path_watcher._MultiPathWatcher._singleton is not None:
event_based_path_watcher._MultiPathWatcher.get_singleton().close()
event_based_path_watcher._MultiPathWatcher._singleton = None
self.observer_class_patcher = mock.patch(
"streamlit.watcher.event_based_path_watcher.Observer"
)
self.util_patcher = mock.patch(
"streamlit.watcher.event_based_path_watcher.util"
)
self.MockObserverClass = self.observer_class_patcher.start()
self.mock_util = self.util_patcher.start()
def tearDown(self):
# The test suite patches MultiPathWatcher. We need to close
# any existing watcher instance here to not break other tests.
if event_based_path_watcher._MultiPathWatcher._singleton is not None:
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo.close()
fo._observer.start.reset_mock()
fo._observer.schedule.reset_mock()
event_based_path_watcher._MultiPathWatcher._singleton = None
self.observer_class_patcher.stop()
self.util_patcher.stop()
def test_file_watch_and_callback(self):
"""Test that when a file is modified, the callback is called."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/file.py", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent("/this/is/my/file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
cb.assert_called_once()
ro.close()
def test_works_with_bytes_path(self):
"""Test that when a file path in bytes, the callback is called."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/file.py", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent(b"/this/is/my/file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
cb.assert_called_once()
ro.close()
def test_works_with_directories(self):
"""Test that when a directory is modified, the callback is called."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/dir", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent("/this/is/my/dir")
ev.event_type = events.EVENT_TYPE_MODIFIED
ev.is_directory = True
folder_handler.on_modified(ev)
cb.assert_called_once()
ro.close()
@mock.patch("os.path.isdir")
def test_correctly_resolves_watched_folder_path(self, mock_is_dir):
mock_is_dir.return_value = True
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/dir", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_path = fo._observer.schedule.call_args[0][1]
assert folder_path == "/this/is/my/dir"
ro.close()
@mock.patch("os.path.isdir")
def test_correctly_resolves_watched_file_path(self, mock_is_dir):
mock_is_dir.return_value = False
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher(
"/this/is/my/dir/file.txt", cb
)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_path = fo._observer.schedule.call_args[0][1]
assert folder_path == "/this/is/my/dir"
ro.close()
def test_changed_modification_time_0_0(self):
"""Test that when a directory is modified, but modification time is 0.0,
the callback is called anyway."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 0.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "42"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/dir", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "64"
ev = events.FileSystemEvent("/this/is/my/dir")
ev.event_type = events.EVENT_TYPE_MODIFIED
ev.is_directory = True
folder_handler.on_modified(ev)
cb.assert_called_once()
ro.close()
def test_kwargs_plumbed_to_calc_md5(self):
"""Test that we pass the glob_pattern and allow_nonexistent kwargs to
calc_md5_with_blocking_retries.
`EventBasedPathWatcher`s can be created with optional kwargs allowing
the caller to specify what types of files to watch (when watching a
directory) and whether to allow watchers on paths with no files/dirs.
This test ensures that these optional parameters make it to our hash
calculation helpers across different on_changed events.
"""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = mock.Mock(return_value="1")
ro = event_based_path_watcher.EventBasedPathWatcher(
"/this/is/my/dir",
cb,
glob_pattern="*.py",
allow_nonexistent=True,
)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
_, kwargs = self.mock_util.calc_md5_with_blocking_retries.call_args
assert kwargs == {"glob_pattern": "*.py", "allow_nonexistent": True}
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = mock.Mock(return_value="3")
ev = events.FileSystemEvent("/this/is/my/dir")
ev.event_type = events.EVENT_TYPE_MODIFIED
ev.is_directory = True
folder_handler.on_modified(ev)
_, kwargs = self.mock_util.calc_md5_with_blocking_retries.call_args
assert kwargs == {"glob_pattern": "*.py", "allow_nonexistent": True}
cb.assert_called_once()
ro.close()
def test_callback_not_called_if_same_mtime(self):
"""Test that we ignore files with same mtime."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/file.py", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
# Same mtime!
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent("/this/is/my/file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
# This is the test:
cb.assert_not_called()
ro.close()
def test_callback_not_called_if_same_md5(self):
"""Test that we ignore files with same md5."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/file.py", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
# Same MD5!
ev = events.FileSystemEvent("/this/is/my/file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
# This is the test:
cb.assert_not_called()
ro.close()
def test_callback_not_called_if_wrong_event_type(self):
"""Test that we ignore created files."""
cb = mock.Mock()
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher("/this/is/my/file.py", cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb.assert_not_called()
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent("/this/is/my/file.py")
ev.event_type = events.EVENT_TYPE_DELETED # Wrong type
folder_handler.on_modified(ev)
# This is the test:
cb.assert_not_called()
ro.close()
def test_multiple_watchers_same_file(self):
"""Test that we can have multiple watchers of the same file."""
filename = "/this/is/my/file.py"
mod_count = [0.0]
def modify_mock_file():
self.mock_util.path_modification_time = lambda *args: mod_count[0]
self.mock_util.calc_md5_with_blocking_retries = (
lambda _, **kwargs: f"{mod_count[0]}"
)
ev = events.FileSystemEvent(filename)
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
mod_count[0] += 1.0
cb1 = mock.Mock()
cb2 = mock.Mock()
watcher1 = event_based_path_watcher.EventBasedPathWatcher(filename, cb1)
watcher2 = event_based_path_watcher.EventBasedPathWatcher(filename, cb2)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
cb1.assert_not_called()
cb2.assert_not_called()
# "Modify" our file
modify_mock_file()
assert cb1.call_count == 1
assert cb2.call_count == 1
# Close watcher1. Only watcher2's callback should be called after this.
watcher1.close()
# Modify our file again
modify_mock_file()
assert cb1.call_count == 1
assert cb2.call_count == 2
watcher2.close()
# Modify our file a final time
modify_mock_file()
# Both watchers are now closed, so their callback counts
# should not have increased.
assert cb1.call_count == 1
assert cb2.call_count == 2
@mock.patch("os.path.isdir")
def test_dir_watcher_file_event_precedence(self, mock_is_dir):
"""Test that file-specific watchers are prioritized for file events.
If we're watching both a directory and a file inside that directory,
an event on the file should be handled by the file's watcher, not the
directory's.
"""
dir_path = "/this/is/my/dir"
file_path = "/this/is/my/dir/file.py"
mock_is_dir.side_effect = lambda path: path == dir_path
dir_cb = mock.Mock()
event_based_path_watcher.EventBasedPathWatcher(dir_path, dir_cb)
file_cb = mock.Mock()
event_based_path_watcher.EventBasedPathWatcher(file_path, file_cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
folder_handler = next(iter(fo._folder_handlers.values()))
self.mock_util.path_modification_time = lambda *args: 102.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "2"
ev = events.FileSystemEvent(file_path)
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
dir_cb.assert_not_called()
file_cb.assert_called_once()
@mock.patch("os.path.isdir")
def test_no_race_condition_on_path_change(self, mock_is_dir):
"""Test for race condition when modifying watchers during event handling.
This test creates two threads:
1. Simulates file modification events, which reads from _watched_paths.
2. Adds and removes watchers, which writes to _watched_paths.
Without a lock, this would cause a "dictionary changed size during
iteration" RuntimeError.
"""
dir_path = "/this/is/my/dir"
mock_is_dir.side_effect = lambda path: path == dir_path
# Initial watcher for the directory
event_based_path_watcher.EventBasedPathWatcher(dir_path, mock.Mock())
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
folder_handler = next(iter(fo._folder_handlers.values()))
# Mock fs-related utils to avoid disk access and to ensure
# that we always proceed past the mtime/md5 checks.
self.mock_util.calc_md5_with_blocking_retries.return_value = "md5"
mod_time = [1.0]
def mock_mod_time(*args, **kwargs):
mod_time[0] += 1.0
return mod_time[0]
self.mock_util.path_modification_time.side_effect = mock_mod_time
def event_handler_thread():
ev = events.FileSystemEvent(f"{dir_path}/some_file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
for _ in range(50):
folder_handler.on_modified(ev)
def watcher_management_thread():
for i in range(50):
path = f"{dir_path}/file_{i}.py"
watcher = event_based_path_watcher.EventBasedPathWatcher(
path, mock.Mock()
)
watcher.close()
t1 = threading.Thread(target=event_handler_thread)
t2 = threading.Thread(target=watcher_management_thread)
t1.start()
t2.start()
t1.join(timeout=5)
t2.join(timeout=5)
# The test succeeds if no exceptions were thrown.
assert t1.is_alive() is False
assert t2.is_alive() is False
@mock.patch("os.path.isdir")
def test_handles_value_error_from_commonpath(self, mock_is_dir):
"""Ensure mixed-drive-like paths (commonpath ValueError) don't crash and are ignored.
We simulate Windows mixed-drive behavior by forcing os.path.commonpath to raise
ValueError. The event should be ignored and no callback invoked.
"""
watched_dir = "/watched"
mock_is_dir.side_effect = lambda p: p == watched_dir
cb = mock.Mock()
# Ensure initial md5/mtime allow watcher creation
self.mock_util.path_modification_time = lambda *args: 101.0
self.mock_util.calc_md5_with_blocking_retries = lambda _, **kwargs: "1"
ro = event_based_path_watcher.EventBasedPathWatcher(watched_dir, cb)
fo = event_based_path_watcher._MultiPathWatcher.get_singleton()
fo._observer.schedule.assert_called_once()
folder_handler = fo._observer.schedule.call_args[0][0]
# Simulate an event on a different "drive" by making commonpath raise
with mock.patch(
"streamlit.watcher.event_based_path_watcher.os.path.commonpath",
side_effect=ValueError,
):
ev = events.FileSystemEvent("/other_drive/some_file.py")
ev.event_type = events.EVENT_TYPE_MODIFIED
folder_handler.on_modified(ev)
# The event is ignored; callback not called and no exception raised
cb.assert_not_called()
ro.close()
| EventBasedPathWatcherTest |
python | mwaskom__seaborn | seaborn/_stats/counting.py | {
"start": 389,
"end": 1090
} | class ____(Stat):
"""
Count distinct observations within groups.
See Also
--------
Hist : A more fully-featured transform including binning and/or normalization.
Examples
--------
.. include:: ../docstrings/objects.Count.rst
"""
group_by_orient: ClassVar[bool] = True
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
var = {"x": "y", "y": "x"}[orient]
res = (
groupby
.agg(data.assign(**{var: data[orient]}), {var: len})
.dropna(subset=["x", "y"])
.reset_index(drop=True)
)
return res
@dataclass
| Count |
python | kamyu104__LeetCode-Solutions | Python/number-of-students-unable-to-eat-lunch.py | {
"start": 50,
"end": 484
} | class ____(object):
def countStudents(self, students, sandwiches):
"""
:type students: List[int]
:type sandwiches: List[int]
:rtype: int
"""
count = collections.Counter(students)
for i, s in enumerate(sandwiches):
if not count[s]:
break
count[s] -= 1
else:
i = len(sandwiches)
return len(sandwiches)-i
| Solution |
python | pytorch__pytorch | torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py | {
"start": 686,
"end": 1120
} | class ____(torch.nn.Module):
def __init__(self, spec=None, group=None, init_rrefs=True) -> None:
super().__init__()
if spec is not None:
self.sharded_tensor2 = sharded_tensor.rand(
spec, 10, 20, process_group=group, init_rrefs=init_rrefs
)
else:
self.sharded_tensor2 = None
self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
| MyShardedModel2 |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/base.py | {
"start": 585,
"end": 5122
} | class ____(BaseReader):
"""
Readability Webpage Loader.
Extracting relevant information from a fully rendered web page.
During the processing, it is always assumed that web pages used as data sources contain textual content.
1. Load the page and wait for it rendered. (playwright)
2. Inject Readability.js to extract the main content.
Args:
proxy (Optional[str], optional): Proxy server. Defaults to None.
wait_until (Optional[Literal["commit", "domcontentloaded", "load", "networkidle"]], optional): Wait until the page is loaded. Defaults to "domcontentloaded".
text_splitter (TextSplitter, optional): Text splitter. Defaults to None.
normalizer (Optional[Callable[[str], str]], optional): Text normalizer. Defaults to nfkc_normalize.
"""
def __init__(
self,
proxy: Optional[str] = None,
wait_until: Optional[
Literal["commit", "domcontentloaded", "load", "networkidle"]
] = "domcontentloaded",
text_splitter: Optional[TextSplitter] = None,
normalize: Optional[Callable[[str], str]] = nfkc_normalize,
) -> None:
self._launch_options = {
"headless": True,
}
self._wait_until = wait_until
if proxy:
self._launch_options["proxy"] = {
"server": proxy,
}
self._text_splitter = text_splitter
self._normalize = normalize
self._readability_js = None
async def async_load_data(self, url: str) -> List[Document]:
"""
Render and load data content from url.
Args:
url (str): URL to scrape.
Returns:
List[Document]: List of documents.
"""
from playwright.async_api import async_playwright
async with async_playwright() as async_playwright:
browser = await async_playwright.chromium.launch(**self._launch_options)
article = await self.scrape_page(
browser,
url,
)
extra_info = {
key: article[key]
for key in [
"title",
"length",
"excerpt",
"byline",
"dir",
"lang",
"siteName",
]
}
if self._normalize is not None:
article["textContent"] = self._normalize(article["textContent"])
texts = []
if self._text_splitter is not None:
texts = self._text_splitter.split_text(article["textContent"])
else:
texts = [article["textContent"]]
await browser.close()
return [Document(text=x, extra_info=extra_info) for x in texts]
def load_data(self, url: str) -> List[Document]:
return async_to_sync(self.async_load_data(url))
async def scrape_page(
self,
browser: Browser,
url: str,
) -> Dict[str, str]:
"""
Scrape a single article url.
Args:
browser (Any): a Playwright Chromium browser.
url (str): URL of the article to scrape.
Returns:
Ref: https://github.com/mozilla/readability
title: article title;
content: HTML string of processed article content;
textContent: text content of the article, with all the HTML tags removed;
length: length of an article, in characters;
excerpt: article description, or short excerpt from the content;
byline: author metadata;
dir: content direction;
siteName: name of the site.
lang: content language
"""
if self._readability_js is None:
with open(path) as f:
self._readability_js = f.read()
inject_readability = f"""
(function(){{
{self._readability_js}
function executor() {{
return new Readability({{}}, document).parse();
}}
return executor();
}}())
"""
# browser = cast(Browser, browser)
page = await browser.new_page(ignore_https_errors=True)
page.set_default_timeout(60000)
await page.goto(url, wait_until=self._wait_until)
r = await page.evaluate(inject_readability)
await page.close()
print("scraped:", url)
return r
| ReadabilityWebPageReader |
python | mlflow__mlflow | mlflow/types/schema.py | {
"start": 11040,
"end": 17264
} | class ____(BaseType):
"""
Specification used to represent a json-convertible object.
"""
def __init__(self, properties: list[Property]) -> None:
self._check_properties(properties)
# Sort by name to make sure the order is stable
self._properties = sorted(properties)
def _check_properties(self, properties):
if not isinstance(properties, list):
raise MlflowException.invalid_parameter_value(
f"Expected properties to be a list, got type {type(properties).__name__}"
)
if len(properties) == 0:
raise MlflowException.invalid_parameter_value(
"Creating Object with empty properties is not allowed."
)
if any(not isinstance(v, Property) for v in properties):
raise MlflowException.invalid_parameter_value(
"Expected values to be instance of Property"
)
# check duplicated property names
names = set()
duplicates = set()
for prop in properties:
if prop.name in names:
duplicates.add(prop.name)
else:
names.add(prop.name)
if len(duplicates) > 0:
raise MlflowException.invalid_parameter_value(
f"Found duplicated property names: `{', '.join(duplicates)}`"
)
@property
def properties(self) -> list[Property]:
"""The list of object properties"""
return self._properties
@properties.setter
def properties(self, value: list[Property]) -> None:
self._check_properties(value)
self._properties = sorted(value)
def __eq__(self, other) -> bool:
if isinstance(other, Object):
return self.properties == other.properties
return False
def __repr__(self) -> str:
joined = ", ".join(map(repr, self.properties))
return "{" + joined + "}"
def to_dict(self):
properties = {
name: value for prop in self.properties for name, value in prop.to_dict().items()
}
return {
"type": OBJECT_TYPE,
"properties": properties,
}
@classmethod
def from_json_dict(cls, **kwargs):
"""
Deserialize from a json loaded dictionary.
The dictionary is expected to contain `type` and
`properties` keys.
Example: {"type": "object", "properties": {"property_name": {"type": "string"}}}
"""
if not {"properties", "type"} <= set(kwargs.keys()):
raise MlflowException(
"Missing keys in Object JSON. Expected to find keys `properties` and `type`"
)
if kwargs["type"] != OBJECT_TYPE:
raise MlflowException("Type mismatch, Object expects `object` as the type")
if not isinstance(kwargs["properties"], dict) or any(
not isinstance(prop, dict) for prop in kwargs["properties"].values()
):
raise MlflowException("Expected properties to be a dictionary of Property JSON")
return cls(
[Property.from_json_dict(**{name: prop}) for name, prop in kwargs["properties"].items()]
)
def _merge(self, other: BaseType) -> Object:
"""
Check if the current object is compatible with another object and return
the updated object.
When we infer the signature from a list of objects, it is possible
that one object has more properties than the other. In this case,
we should mark those optional properties as required=False.
For properties with the same name, we should check the compatibility
of two properties and update.
An example of two compatible objects:
.. code-block:: python
obj1 = Object(
properties=[
Property(name="a", dtype=DataType.string),
Property(name="b", dtype=DataType.double),
]
)
obj2 = Object(
properties=[
Property(name="a", dtype=DataType.string),
Property(name="c", dtype=DataType.boolean),
]
)
updated_obj = obj1._merge(obj2)
assert updated_obj == Object(
properties=[
Property(name="a", dtype=DataType.string),
Property(name="b", dtype=DataType.double, required=False),
Property(name="c", dtype=DataType.boolean, required=False),
]
)
"""
# Merging object type with AnyType makes all properties optional
if isinstance(other, AnyType):
return Object(
properties=[
Property(name=prop.name, dtype=prop.dtype, required=False)
for prop in self.properties
]
)
if not isinstance(other, Object):
raise MlflowException(
f"Can't merge object with non-object type: {type(other).__name__}"
)
if self == other:
return deepcopy(self)
prop_dict1 = {prop.name: prop for prop in self.properties}
prop_dict2 = {prop.name: prop for prop in other.properties}
# For each property in the first element, if it doesn't appear
# later, we update required=False
updated_properties = [
Property(name=k, dtype=prop_dict1[k].dtype, required=False)
for k in prop_dict1.keys() - prop_dict2.keys()
]
# For common keys, property type should be the same
updated_properties.extend(
prop_dict1[k]._merge(prop_dict2[k]) for k in prop_dict1.keys() & prop_dict2.keys()
)
# For each property appears in the second elements, if it doesn't
# exist, we update and set required=False
updated_properties.extend(
Property(name=k, dtype=prop_dict2[k].dtype, required=False)
for k in prop_dict2.keys() - prop_dict1.keys()
)
return Object(properties=updated_properties)
| Object |
python | mlflow__mlflow | tests/store/artifact/test_azure_data_lake_artifact_repo.py | {
"start": 1118,
"end": 15727
} | class ____:
def __init__(self, items, next_marker=None):
self.items = items
self.next_marker = next_marker
def __iter__(self):
return iter(self.items)
@pytest.fixture
def mock_data_lake_client():
mock_adls_client = mock.MagicMock(autospec=DataLakeServiceClient)
with mock.patch(
"mlflow.store.artifact.azure_data_lake_artifact_repo._get_data_lake_client",
return_value=mock_adls_client,
):
yield mock_adls_client
@pytest.fixture
def mock_filesystem_client(mock_data_lake_client):
mock_fs_client = mock.MagicMock(autospec=FileSystemClient)
mock_data_lake_client.get_file_system_client.return_value = mock_fs_client
return mock_fs_client
@pytest.fixture
def mock_directory_client(mock_filesystem_client):
mock_directory_client = mock.MagicMock(autospec=DataLakeDirectoryClient)
mock_filesystem_client.get_directory_client.return_value = mock_directory_client
return mock_directory_client
@pytest.fixture
def mock_file_client(mock_directory_client):
mock_file_client = mock.MagicMock(autospec=DataLakeFileClient)
mock_directory_client.get_file_client.return_value = mock_file_client
return mock_file_client
@pytest.mark.parametrize(
("uri", "filesystem", "account", "region_suffix", "path"),
[
(
"abfss://filesystem@acct.dfs.core.windows.net/path",
"filesystem",
"acct",
"dfs.core.windows.net",
"path",
),
(
"abfss://filesystem@acct.dfs.core.windows.net",
"filesystem",
"acct",
"dfs.core.windows.net",
"",
),
(
"abfss://filesystem@acct.dfs.core.windows.net/",
"filesystem",
"acct",
"dfs.core.windows.net",
"",
),
(
"abfss://filesystem@acct.dfs.core.windows.net/a/b",
"filesystem",
"acct",
"dfs.core.windows.net",
"a/b",
),
(
"abfss://filesystem@acct.dfs.core.chinacloudapi.cn/a/b",
"filesystem",
"acct",
"dfs.core.chinacloudapi.cn",
"a/b",
),
(
"abfss://filesystem@acct.privatelink.dfs.core.windows.net/a/b",
"filesystem",
"acct",
"privatelink.dfs.core.windows.net",
"a/b",
),
(
"abfss://filesystem@acct.dfs.core.usgovcloudapi.net/a/b",
"filesystem",
"acct",
"dfs.core.usgovcloudapi.net",
"a/b",
),
],
)
def test_parse_valid_abfss_uri(uri, filesystem, account, region_suffix, path):
assert _parse_abfss_uri(uri) == (filesystem, account, region_suffix, path)
@pytest.mark.parametrize(
"uri",
[
"abfss://filesystem@acct/path",
"abfss://acct.dfs.core.windows.net/path",
"abfss://@acct.dfs.core.windows.net/path",
],
)
def test_parse_invalid_abfss_uri(uri):
with pytest.raises(MlflowException, match="ABFSS URI must be of the form"):
_parse_abfss_uri(uri)
def test_parse_invalid_abfss_uri_bad_scheme():
with pytest.raises(MlflowException, match="Not an ABFSS URI"):
_parse_abfss_uri("abfs://cont@acct.dfs.core.windows.net/path")
def test_list_artifacts_empty(mock_data_lake_client):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
mock_data_lake_client.get_file_system_client.get_paths.return_value = MockPathList([])
assert repo.list_artifacts() == []
def test_list_artifacts_single_file(mock_data_lake_client):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
# Evaluate single file
path_props = PathProperties(name=posixpath.join(TEST_DATA_LAKE_URI, "file"), content_length=42)
mock_data_lake_client.get_file_system_client.get_paths.return_value = MockPathList([path_props])
assert repo.list_artifacts("file") == []
def test_list_artifacts(mock_filesystem_client):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
# Create some files to return
dir_prefix = PathProperties(is_directory=True, name=posixpath.join(TEST_ROOT_PATH, "dir"))
path_props = PathProperties(content_length=42, name=posixpath.join(TEST_ROOT_PATH, "file"))
mock_filesystem_client.get_paths.return_value = MockPathList([dir_prefix, path_props])
artifacts = repo.list_artifacts()
mock_filesystem_client.get_paths.assert_called_once_with(path=TEST_ROOT_PATH, recursive=False)
assert artifacts[0].path == "dir"
assert artifacts[0].is_dir is True
assert artifacts[0].file_size is None
assert artifacts[1].path == "file"
assert artifacts[1].is_dir is False
assert artifacts[1].file_size == 42
mock_filesystem_client.reset_mock()
repo.list_artifacts(path="nonexistent-dir")
mock_filesystem_client.get_paths.assert_called_once_with(
path=posixpath.join(TEST_ROOT_PATH, "nonexistent-dir"), recursive=False
)
@pytest.mark.parametrize(
"contents",
["", "B"],
)
def test_log_artifact(mock_filesystem_client, mock_directory_client, tmp_path, contents):
file_name = "b.txt"
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
parentd = tmp_path.joinpath("data")
parentd.mkdir()
subd = parentd.joinpath("subdir")
subd.mkdir()
subd.joinpath("b.txt").write_text(contents)
repo.log_artifact(subd.joinpath("b.txt"))
mock_filesystem_client.get_directory_client.assert_called_once_with(TEST_ROOT_PATH)
mock_directory_client.get_file_client.assert_called_once_with(file_name)
if contents == "":
mock_directory_client.get_file_client(file_name).create_file.assert_called()
else:
mock_directory_client.get_file_client(file_name).upload_data.assert_called()
def test_log_artifacts(mock_filesystem_client, mock_directory_client, tmp_path):
fake_sas_token = "fake_session_token"
repo = AzureDataLakeArtifactRepository(
TEST_DATA_LAKE_URI, credential=AzureSasCredential(fake_sas_token)
)
parentd = tmp_path.joinpath("data")
parentd.mkdir()
subd = parentd.joinpath("subdir")
subd.mkdir()
parentd.joinpath("a.txt").write_text("A")
subd.joinpath("b.txt").write_text("B")
subd.joinpath("empty-file.txt").write_text("")
repo.log_artifacts(parentd)
called_directories = [
call[0][0] for call in mock_filesystem_client.get_directory_client.call_args_list
]
assert len(called_directories) == 3
assert sorted(called_directories) == [
posixpath.join(TEST_ROOT_PATH, "."),
posixpath.join(TEST_ROOT_PATH, "subdir"),
posixpath.join(TEST_ROOT_PATH, "subdir"),
]
uploaded_filenames = [
call[0][0] for call in mock_directory_client.get_file_client.call_args_list
]
assert len(uploaded_filenames) == 3
assert set(uploaded_filenames) == {"a.txt", "b.txt", "empty-file.txt"}
mock_directory_client.get_file_client("a.txt").upload_data.assert_called()
mock_directory_client.get_file_client("b.txt").upload_data.assert_called()
mock_directory_client.get_file_client("subdir/empty-file.txt").create_file.assert_called()
def test_log_artifacts_in_parallel_when_necessary(tmp_path, monkeypatch):
fake_sas_token = "fake_session_token"
repo = AzureDataLakeArtifactRepository(
TEST_DATA_LAKE_URI, credential=AzureSasCredential(fake_sas_token)
)
parentd = tmp_path.joinpath("data")
parentd.mkdir()
parentd.joinpath("a.txt").write_text("ABCDE")
monkeypatch.setenv("MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE", "0")
with (
mock.patch(
f"{ADLS_ARTIFACT_REPOSITORY}._multipart_upload", return_value=None
) as multipart_upload_mock,
mock.patch(f"{ADLS_ARTIFACT_REPOSITORY}.log_artifact", return_value=None),
):
repo.log_artifacts(parentd)
multipart_upload_mock.assert_called_with(
ArtifactCredentialInfo(
signed_uri="https://account.dfs.core.windows.net/filesystem/some/path/"
+ "./a.txt?fake_session_token"
),
ANY,
"./a.txt",
)
@pytest.mark.parametrize(
("file_size", "is_parallel_download"),
[(None, False), (100, False), (500 * 1024**2 - 1, False), (500 * 1024**2, True)],
)
def test_download_file_in_parallel_when_necessary(file_size, is_parallel_download):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
remote_file_path = "file_1.txt"
list_artifacts_result = (
[FileInfo(path=remote_file_path, is_dir=False, file_size=file_size)] if file_size else []
)
with (
mock.patch(
f"{ADLS_ARTIFACT_REPOSITORY}.list_artifacts",
return_value=list_artifacts_result,
),
mock.patch(
f"{ADLS_ARTIFACT_REPOSITORY}._download_from_cloud", return_value=None
) as download_mock,
mock.patch(
f"{ADLS_ARTIFACT_REPOSITORY}._parallelized_download_from_cloud", return_value=None
) as parallel_download_mock,
):
repo.download_artifacts("")
if is_parallel_download:
parallel_download_mock.assert_called_with(file_size, remote_file_path, ANY)
else:
download_mock.assert_called()
def test_download_file_artifact(mock_directory_client, mock_file_client, tmp_path):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
def create_file(file):
local_path = os.path.basename(file.name)
f = tmp_path.joinpath(local_path)
f.write_text("hello world!")
mock_file_client.download_file().readinto.side_effect = create_file
repo.download_artifacts("test.txt")
assert os.path.exists(os.path.join(tmp_path, "test.txt"))
mock_directory_client.get_file_client.assert_called_once_with("test.txt")
def test_download_directory_artifact(mock_filesystem_client, mock_file_client, tmp_path):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
file_path_1 = "file_1"
file_path_2 = "file_2"
path_props_1 = PathProperties(
content_length=42, name=posixpath.join(TEST_ROOT_PATH, file_path_1)
)
path_props_2 = PathProperties(
content_length=42, name=posixpath.join(TEST_ROOT_PATH, file_path_2)
)
dir_name = "dir"
dir_path = posixpath.join(TEST_ROOT_PATH, dir_name)
dir_props = PathProperties(is_directory=True, name=dir_path)
dir_file_name = "subdir_file"
dir_file_props = PathProperties(content_length=42, name=posixpath.join(dir_path, dir_file_name))
def get_mock_listing(*args, **kwargs):
"""
Produces a mock listing that only contains content if the
specified prefix is the artifact root. This allows us to mock
`list_artifacts` during the `_download_artifacts_into` subroutine
without recursively listing the same artifacts at every level of the
directory traversal.
"""
path_arg = posixpath.abspath(kwargs["path"])
if path_arg == posixpath.abspath(TEST_ROOT_PATH):
return MockPathList([path_props_1, path_props_2, dir_props])
elif path_arg == posixpath.abspath(dir_path):
return MockPathList([dir_file_props])
else:
return MockPathList([])
def create_file(buffer):
buffer.write(b"hello world!")
mock_filesystem_client.get_paths.side_effect = get_mock_listing
mock_file_client.download_file().readinto.side_effect = create_file
# Ensure that the root directory can be downloaded successfully
dest_dir = tmp_path.joinpath("download_dir")
dest_dir.mkdir()
repo.download_artifacts(artifact_path="", dst_path=dest_dir)
# Ensure that the `mkfile` side effect copied all of the download artifacts into `tmp_path`
dir_contents = os.listdir(dest_dir)
assert file_path_1 in dir_contents
assert file_path_2 in dir_contents
assert dir_name in dir_contents
subdir_contents = os.listdir(dest_dir.joinpath(dir_name))
assert dir_file_name in subdir_contents
def test_refresh_credentials():
dl_client = mock.MagicMock()
with mock.patch(
f"{ADLS_REPOSITORY_PACKAGE}._get_data_lake_client", return_value=dl_client
) as get_data_lake_client_mock:
fs_client = mock.MagicMock()
dl_client.get_file_system_client.return_value = fs_client
resp = requests.Response()
resp.status_code = 401
err = requests.HTTPError(response=resp)
fs_client.get_directory_client.side_effect = err
second_credential = AzureSasCredential("new_fake_token")
def credential_refresh():
return {"credential": second_credential}
first_credential = AzureSasCredential("fake_token")
repo = AzureDataLakeArtifactRepository(
TEST_DATA_LAKE_URI,
credential=first_credential,
credential_refresh_def=credential_refresh,
)
get_data_lake_client_mock.assert_called_with(account_url=ANY, credential=first_credential)
try:
repo._download_from_cloud("test.txt", "local_path")
except requests.HTTPError as e:
assert e == err
get_data_lake_client_mock.assert_called_with(account_url=ANY, credential=second_credential)
def test_trace_data(mock_data_lake_client, tmp_path):
repo = AzureDataLakeArtifactRepository(TEST_DATA_LAKE_URI, credential=TEST_CREDENTIAL)
with pytest.raises(MlflowException, match=r"Trace data not found for path="):
repo.download_trace_data()
trace_data_path = tmp_path.joinpath("traces.json")
trace_data_path.write_text("invalid data")
with (
mock.patch(
"mlflow.store.artifact.artifact_repo.try_read_trace_data",
side_effect=lambda x: try_read_trace_data(trace_data_path),
),
pytest.raises(MlflowTraceDataCorrupted, match=r"Trace data is corrupted for path="),
):
repo.download_trace_data()
mock_trace_data = {"spans": [], "request": {"test": 1}, "response": {"test": 2}}
trace_data_path.write_text(json.dumps(mock_trace_data))
with mock.patch(
"mlflow.store.artifact.artifact_repo.try_read_trace_data",
side_effect=lambda x: try_read_trace_data(trace_data_path),
):
assert repo.download_trace_data() == mock_trace_data
| MockPathList |
python | PrefectHQ__prefect | tests/cli/test_profile.py | {
"start": 19109,
"end": 23227
} | class ____:
def test_populate_defaults(self, temporary_profiles_path: Path):
default_profiles = _read_profiles_from(DEFAULT_PROFILES_PATH)
assert not temporary_profiles_path.exists()
invoke_and_assert(
["profile", "populate-defaults"],
user_input="y",
expected_output_contains=[
"Proposed Changes:",
"Add 'ephemeral'",
"Add 'local'",
"Add 'cloud'",
"Add 'test'",
f"Profiles updated in {temporary_profiles_path}",
"Use with prefect profile use [PROFILE-NAME]",
],
)
assert temporary_profiles_path.exists()
populated_profiles = load_profiles()
assert populated_profiles.names == default_profiles.names
assert populated_profiles.active_name == default_profiles.active_name
assert {"local", "ephemeral", "cloud", "test"} == set(populated_profiles.names)
for name in default_profiles.names:
assert populated_profiles[name].settings == default_profiles[name].settings
def test_populate_defaults_with_existing_profiles(
self, temporary_profiles_path: Path
):
existing_profiles = ProfilesCollection(
profiles=[Profile(name="existing", settings={PREFECT_API_KEY: "test_key"})],
active="existing",
)
save_profiles(existing_profiles)
invoke_and_assert(
["profile", "populate-defaults"],
user_input="y\ny", # Confirm backup and update
expected_output_contains=[
"Proposed Changes:",
"Add 'ephemeral'",
"Add 'local'",
"Add 'cloud'",
f"Back up existing profiles to {temporary_profiles_path}.bak?",
f"Update profiles at {temporary_profiles_path}?",
f"Profiles updated in {temporary_profiles_path}",
],
)
new_profiles = load_profiles()
assert {"local", "ephemeral", "cloud", "test", "existing"} == set(
new_profiles.names
)
backup_profiles = _read_profiles_from(
temporary_profiles_path.with_suffix(".toml.bak")
)
assert "existing" in backup_profiles.names
assert backup_profiles["existing"].settings == {PREFECT_API_KEY: "test_key"}
def test_populate_defaults_no_changes_needed(self, temporary_profiles_path: Path):
shutil.copy(DEFAULT_PROFILES_PATH, temporary_profiles_path)
invoke_and_assert(
["profile", "populate-defaults"],
expected_output_contains=[
"No changes needed. All profiles are up to date.",
],
expected_code=0,
)
assert temporary_profiles_path.read_text() == DEFAULT_PROFILES_PATH.read_text()
def test_show_profile_changes(self, capsys: pytest.CaptureFixture[str]):
default_profiles = ProfilesCollection(
profiles=[
Profile(
name="ephemeral",
settings={PREFECT_API_URL: "https://api.prefect.io"},
),
Profile(
name="local", settings={PREFECT_API_URL: "http://localhost:4200"}
),
Profile(
name="cloud",
settings={PREFECT_API_URL: "https://api.prefect.cloud"},
),
]
)
user_profiles = ProfilesCollection(
profiles=[
Profile(name="default", settings={PREFECT_API_KEY: "test_key"}),
Profile(name="custom", settings={PREFECT_API_KEY: "custom_key"}),
]
)
changes = show_profile_changes(user_profiles, default_profiles)
assert changes is True
captured = capsys.readouterr()
output = captured.out
assert "Proposed Changes:" in output
assert "Add 'ephemeral'" in output
assert "Add 'local'" in output
assert "Add 'cloud'" in output
| TestProfilesPopulateDefaults |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_reflection.py | {
"start": 2081,
"end": 6939
} | class ____(OneConnectionTablesTest):
__sparse_driver_backend__ = True
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
if testing.requires.schemas.enabled:
Table(
"test_table_s",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
schema=config.test_schema,
)
if testing.requires.view_reflection:
cls.define_views(metadata)
if testing.requires.has_temp_table.enabled:
cls.define_temp_tables(metadata)
@classmethod
def define_views(cls, metadata):
test_table = metadata.tables["test_table"]
sa.CreateView(
sa.select(test_table.c.id, test_table.c.data),
"vv",
metadata=metadata,
)
if testing.requires.schemas.enabled:
test_table_s = metadata.tables[
f"{config.test_schema}.test_table_s"
]
sa.CreateView(
sa.select(test_table_s.c.id, test_table_s.c.data),
"vv",
metadata=metadata,
schema=config.test_schema,
)
@classmethod
def temp_table_name(cls):
return get_temp_table_name(
config, config.db, f"user_tmp_{config.ident}"
)
@classmethod
def define_temp_tables(cls, metadata):
kw = temp_table_keyword_args(config, config.db)
table_name = cls.temp_table_name()
user_tmp = Table(
table_name,
metadata,
Column("id", sa.INT, primary_key=True),
Column("name", sa.VARCHAR(50)),
**kw,
)
if (
testing.requires.view_reflection.enabled
and testing.requires.temporary_views.enabled
):
event.listen(
user_tmp,
"after_create",
DDL(
"create temporary view user_tmp_v as "
"select * from user_tmp_%s" % config.ident
),
)
event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v"))
def test_has_table(self):
with config.db.begin() as conn:
is_true(config.db.dialect.has_table(conn, "test_table"))
is_false(config.db.dialect.has_table(conn, "test_table_s"))
is_false(config.db.dialect.has_table(conn, "nonexistent_table"))
def test_has_table_cache(self, metadata):
insp = inspect(config.db)
is_true(insp.has_table("test_table"))
nt = Table("new_table", metadata, Column("col", Integer))
is_false(insp.has_table("new_table"))
nt.create(config.db)
try:
is_false(insp.has_table("new_table"))
insp.clear_cache()
is_true(insp.has_table("new_table"))
finally:
nt.drop(config.db)
@testing.requires.schemas
def test_has_table_schema(self):
with config.db.begin() as conn:
is_false(
config.db.dialect.has_table(
conn, "test_table", schema=config.test_schema
)
)
is_true(
config.db.dialect.has_table(
conn, "test_table_s", schema=config.test_schema
)
)
is_false(
config.db.dialect.has_table(
conn, "nonexistent_table", schema=config.test_schema
)
)
@testing.requires.schemas
def test_has_table_nonexistent_schema(self):
with config.db.begin() as conn:
is_false(
config.db.dialect.has_table(
conn, "test_table", schema="nonexistent_schema"
)
)
@testing.requires.views
def test_has_table_view(self, connection):
insp = inspect(connection)
is_true(insp.has_table("vv"))
@testing.requires.has_temp_table
def test_has_table_temp_table(self, connection):
insp = inspect(connection)
temp_table_name = self.temp_table_name()
is_true(insp.has_table(temp_table_name))
@testing.requires.has_temp_table
@testing.requires.view_reflection
@testing.requires.temporary_views
def test_has_table_temp_view(self, connection):
insp = inspect(connection)
is_true(insp.has_table("user_tmp_v"))
@testing.requires.views
@testing.requires.schemas
def test_has_table_view_schema(self, connection):
insp = inspect(connection)
is_true(insp.has_table("vv", config.test_schema))
| HasTableTest |
python | django__django | tests/forms_tests/field_tests/test_timefield.py | {
"start": 184,
"end": 2035
} | class ____(FormFieldAssertionsMixin, SimpleTestCase):
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean("14:25"))
self.assertEqual(datetime.time(14, 25, 59), f.clean("14:25:59"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("hello")
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("1:24 p.m.")
def test_timefield_2(self):
f = TimeField(input_formats=["%I:%M %p"])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean("4:25 AM"))
self.assertEqual(datetime.time(16, 25), f.clean("4:25 PM"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("14:30:45")
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(" 14:25 "))
self.assertEqual(datetime.time(14, 25, 59), f.clean(" 14:25:59 "))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(" ")
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=["%H:%M", "%H:%M %p"])
self.assertTrue(f.has_changed(t1, "12:51"))
self.assertFalse(f.has_changed(t2, "12:51"))
self.assertFalse(f.has_changed(t2, "12:51 PM"))
| TimeFieldTest |
python | google__jax | jax/experimental/mosaic/gpu/examples/matmul.py | {
"start": 1450,
"end": 1681
} | class ____:
m: int
n: int
k: int
# Allow access by .mk, .kn, .mn, etc.
def __getattr__(self, name):
if len(name) == 1:
return super().__getattribute__(name)
return tuple(getattr(self, d) for d in name)
| Tiling |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 44178,
"end": 44469
} | class ____(sgqlc.types.Scalar):
"""
See source code for more info.
"""
__schema__ = graphql_schema
########################################################################
# Input Objects
########################################################################
| X509Certificate |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-powerful-integers.py | {
"start": 54,
"end": 1058
} | class ____(object):
def numberOfPowerfulInt(self, start, finish, limit, s):
"""
:type start: int
:type finish: int
:type limit: int
:type s: str
:rtype: int
"""
def count(x):
def length(x):
result = 0
while x:
x //= 10
result += 1
return result
result = 0
n = length(x)
base = 10**n
l = n-len(s)
cnt = (limit+1)**l
for i in xrange(l):
base //= 10
curr = x//base%10
cnt //= limit+1
result += (min(curr-1, limit)-0+1)*cnt
if curr > limit:
break
else:
if x%base >= int(s):
result += 1
return result
return count(finish)-count(start-1)
# Time: O(logf)
# Space: O(logf)
# math, combinatorics
| Solution |
python | huggingface__transformers | tests/models/bridgetower/test_modeling_bridgetower.py | {
"start": 5251,
"end": 10818
} | class ____:
def __init__(
self,
parent,
text_kwargs=None,
vision_kwargs=None,
share_cross_modal_transformer_layers=True,
share_link_tower_layers=False,
link_tower_type="add",
init_layernorm_from_vision_encoder=False,
contrastive_hidden_size=512,
logit_scale_init_value=2.6592,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=128,
):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = BridgeTowerTextModelTester(parent, **text_kwargs)
self.vision_model_tester = BridgeTowerImageModelTester(parent, **vision_kwargs)
self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers
self.share_link_tower_layers = share_link_tower_layers
self.link_tower_type = link_tower_type
self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder
self.contrastive_hidden_size = contrastive_hidden_size
self.logit_scale_init_value = logit_scale_init_value
self.batch_size = 1
self.expected_num_hidden_layers = 8
self.is_training = False
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values, pixel_mask = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return (config, input_ids, attention_mask, pixel_values, pixel_mask)
def get_config(self):
return BridgeTowerConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
share_cross_modal_transformer_layers=self.share_cross_modal_transformer_layers,
share_link_tower_layers=self.share_link_tower_layers,
link_tower_type=self.link_tower_type,
init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder,
contrastive_hidden_size=self.contrastive_hidden_size,
logit_scale_init_value=self.logit_scale_init_value,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
pixel_values,
pixel_mask,
):
model = BridgeTowerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values)
self.parent.assertEqual(
result["text_features"].shape,
(self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size),
)
self.parent.assertEqual(
result["image_features"].shape,
(self.batch_size, self.vision_model_tester.num_image_features, self.vision_model_tester.hidden_size),
)
self.parent.assertEqual(
result["pooler_output"].shape,
(self.batch_size, self.text_model_tester.hidden_size + self.vision_model_tester.hidden_size),
)
def create_and_check_for_image_and_text_retrieval(
self,
config,
input_ids,
attention_mask,
pixel_values,
pixel_mask,
):
bridgetower_itm_output_last_dimension = 2
model = BridgeTowerForImageAndTextRetrieval(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, bridgetower_itm_output_last_dimension))
def create_and_check_for_masked_language_modeling(
self,
config,
input_ids,
attention_mask,
pixel_values,
pixel_mask,
):
model = BridgeTowerForMaskedLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values)
self.parent.assertEqual(
result.logits.shape,
(self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, attention_mask, pixel_values, pixel_mask) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"pixel_mask": pixel_mask,
}
return config, inputs_dict
@require_torch
| BridgeTowerModelTester |
python | apache__thrift | lib/py/src/Thrift.py | {
"start": 1447,
"end": 1984
} | class ____(object):
"""Base class for processor, which works on two streams."""
def process(self, iprot, oprot):
"""
Process a request. The normal behvaior is to have the
processor invoke the correct handler and then it is the
server's responsibility to write the response to oprot.
"""
pass
def on_message_begin(self, func):
"""
Install a callback that receives (name, type, seqid)
after the message header is read.
"""
pass
| TProcessor |
python | graphql-python__graphene | graphene/types/tests/test_datetime.py | {
"start": 189,
"end": 7482
} | class ____(ObjectType):
datetime = DateTime(_in=DateTime(name="in"))
date = Date(_in=Date(name="in"))
time = Time(_at=Time(name="at"))
def resolve_datetime(self, info, _in=None):
return _in
def resolve_date(self, info, _in=None):
return _in
def resolve_time(self, info, _at=None):
return _at
schema = Schema(query=Query)
@fixture
def sample_datetime():
utc_datetime = datetime.datetime(2019, 5, 25, 5, 30, 15, 10, datetime.timezone.utc)
return utc_datetime
@fixture
def sample_time(sample_datetime):
time = datetime.time(
sample_datetime.hour,
sample_datetime.minute,
sample_datetime.second,
sample_datetime.microsecond,
sample_datetime.tzinfo,
)
return time
@fixture
def sample_date(sample_datetime):
date = sample_datetime.date()
return date
def test_datetime_query(sample_datetime):
isoformat = sample_datetime.isoformat()
result = schema.execute("""{ datetime(in: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"datetime": isoformat}
def test_datetime_query_with_variables(sample_datetime):
isoformat = sample_datetime.isoformat()
result = schema.execute(
"""
query GetDate($datetime: DateTime) {
literal: datetime(in: "%s")
value: datetime(in: $datetime)
}
"""
% isoformat,
variable_values={"datetime": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_date_query(sample_date):
isoformat = sample_date.isoformat()
result = schema.execute("""{ date(in: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"date": isoformat}
def test_date_query_with_variables(sample_date):
isoformat = sample_date.isoformat()
result = schema.execute(
"""
query GetDate($date: Date) {
literal: date(in: "%s")
value: date(in: $date)
}
"""
% isoformat,
variable_values={"date": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_time_query(sample_time):
isoformat = sample_time.isoformat()
result = schema.execute("""{ time(at: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"time": isoformat}
def test_time_query_with_variables(sample_time):
isoformat = sample_time.isoformat()
result = schema.execute(
"""
query GetTime($time: Time) {
literal: time(at: "%s")
value: time(at: $time)
}
"""
% isoformat,
variable_values={"time": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_bad_datetime_query():
not_a_date = "Some string that's not a datetime"
result = schema.execute("""{ datetime(in: "%s") }""" % not_a_date)
assert result.errors and len(result.errors) == 1
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "DateTime cannot represent value:"
' "Some string that\'s not a datetime"'
)
assert result.data is None
def test_bad_date_query():
not_a_date = "Some string that's not a date"
result = schema.execute("""{ date(in: "%s") }""" % not_a_date)
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "Date cannot represent value:"
' "Some string that\'s not a date"'
)
assert result.data is None
def test_bad_time_query():
not_a_date = "Some string that's not a time"
result = schema.execute("""{ time(at: "%s") }""" % not_a_date)
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "Time cannot represent value:"
' "Some string that\'s not a time"'
)
assert result.data is None
def test_datetime_query_variable(sample_datetime):
isoformat = sample_datetime.isoformat()
# test datetime variable provided as Python datetime
result = schema.execute(
"""query Test($date: DateTime){ datetime(in: $date) }""",
variables={"date": sample_datetime},
)
assert not result.errors
assert result.data == {"datetime": isoformat}
# test datetime variable in string representation
result = schema.execute(
"""query Test($date: DateTime){ datetime(in: $date) }""",
variables={"date": isoformat},
)
assert not result.errors
assert result.data == {"datetime": isoformat}
def test_date_query_variable(sample_date):
isoformat = sample_date.isoformat()
# test date variable provided as Python date
result = schema.execute(
"""query Test($date: Date){ date(in: $date) }""",
variables={"date": sample_date},
)
assert not result.errors
assert result.data == {"date": isoformat}
# test date variable in string representation
result = schema.execute(
"""query Test($date: Date){ date(in: $date) }""", variables={"date": isoformat}
)
assert not result.errors
assert result.data == {"date": isoformat}
def test_time_query_variable(sample_time):
isoformat = sample_time.isoformat()
# test time variable provided as Python time
result = schema.execute(
"""query Test($time: Time){ time(at: $time) }""",
variables={"time": sample_time},
)
assert not result.errors
assert result.data == {"time": isoformat}
# test time variable in string representation
result = schema.execute(
"""query Test($time: Time){ time(at: $time) }""", variables={"time": isoformat}
)
assert not result.errors
assert result.data == {"time": isoformat}
def test_support_isoformat():
isoformat = "2011-11-04T00:05:23Z"
# test time variable provided as Python time
result = schema.execute(
"""query DateTime($time: DateTime){ datetime(in: $time) }""",
variables={"time": isoformat},
)
assert not result.errors
assert result.data == {"datetime": "2011-11-04T00:05:23+00:00"}
def test_bad_variables(sample_date, sample_datetime, sample_time):
def _test_bad_variables(type_, input_):
result = schema.execute(
f"""query Test($input: {type_}){{ {type_.lower()}(in: $input) }}""",
variables={"input": input_},
)
assert isinstance(result.errors, list)
assert len(result.errors) == 1
assert isinstance(result.errors[0], GraphQLError)
assert result.data is None
not_a_date = dict()
not_a_date_str = "Some string that's not a date"
today = sample_date
now = sample_datetime
time = sample_time
bad_pairs = [
("DateTime", not_a_date),
("DateTime", not_a_date_str),
("DateTime", today),
("DateTime", time),
("Date", not_a_date),
("Date", not_a_date_str),
("Date", time),
("Time", not_a_date),
("Time", not_a_date_str),
("Time", now),
("Time", today),
]
for type_, input_ in bad_pairs:
_test_bad_variables(type_, input_)
| Query |
python | walkccc__LeetCode | solutions/1862. Sum of Floored Pairs/1862.py | {
"start": 0,
"end": 601
} | class ____:
def sumOfFlooredPairs(self, nums: list[int]) -> int:
MOD = 1_000_000_007
MAX = max(nums)
ans = 0
count = [0] * (MAX + 1)
for num in nums:
count[num] += 1
for i in range(1, MAX + 1):
count[i] += count[i - 1]
for i in range(1, MAX + 1):
if count[i] > count[i - 1]:
summ = 0
j = 1
while i * j <= MAX:
lo = i * j - 1
hi = i * (j + 1) - 1
summ += (count[min(hi, MAX)] - count[lo]) * j
j += 1
ans += summ * (count[i] - count[i - 1])
ans %= MOD
return ans
| Solution |
python | spyder-ide__spyder | spyder/utils/syntaxhighlighters.py | {
"start": 73899,
"end": 77215
} | class ____(BaseSH):
"""Markdown Syntax Highlighter"""
# Syntax highlighting rules:
PROG = re.compile(make_md_patterns(), re.S)
NORMAL = 0
CODE = 1
def highlightBlock(self, text):
text = str(text)
previous_state = self.previousBlockState()
if previous_state == self.CODE:
self.setFormat(0, qstring_length(text), self.formats["code"])
else:
previous_state = self.NORMAL
self.setFormat(0, qstring_length(text), self.formats["normal"])
self.setCurrentBlockState(previous_state)
match_count = 0
n_characters = qstring_length(text)
for match in self.PROG.finditer(text):
for key, value in list(match.groupdict().items()):
start, end = get_span(match, key)
if value:
previous_state = self.previousBlockState()
if previous_state == self.CODE:
if key == "code":
# Change to normal
self.setFormat(0, qstring_length(text),
self.formats["normal"])
self.setCurrentBlockState(self.NORMAL)
else:
continue
else:
if key == "code":
# Change to code
self.setFormat(0, qstring_length(text),
self.formats["code"])
self.setCurrentBlockState(self.CODE)
continue
self.setFormat(start, end - start, self.formats[key])
match_count += 1
if match_count >= n_characters:
break
self.highlight_extras(text)
def setup_formats(self, font=None):
super().setup_formats(font)
font = QTextCharFormat(self.formats['normal'])
font.setFontItalic(True)
self.formats['italic'] = font
self.formats['strong'] = self.formats['definition']
font = QTextCharFormat(self.formats['normal'])
font.setFontStrikeOut(True)
self.formats['strikethrough'] = font
font = QTextCharFormat(self.formats['string'])
font.setUnderlineStyle(QTextCharFormat.SingleUnderline)
self.formats['link'] = font
self.formats['code'] = self.formats['string']
self.formats['inline_code'] = self.formats['string']
font = QTextCharFormat(self.formats['keyword'])
font.setFontWeight(QFont.Bold)
self.formats['title'] = font
#==============================================================================
# Pygments based omni-parser
#==============================================================================
# IMPORTANT NOTE:
# --------------
# Do not be tempted to generalize the use of PygmentsSH (that is tempting
# because it would lead to more generic and compact code, and not only in
# this very module) because this generic syntax highlighter is far slower
# than the native ones (all classes above). For example, a Python syntax
# highlighter based on PygmentsSH would be 2 to 3 times slower than the
# current native PythonSH syntax highlighter.
| MarkdownSH |
python | chroma-core__chroma | chromadb/auth/__init__.py | {
"start": 5316,
"end": 6298
} | class ____(str, Enum):
"""
The set of actions that can be authorized by the authorization provider.
"""
RESET = "system:reset"
CREATE_TENANT = "tenant:create_tenant"
GET_TENANT = "tenant:get_tenant"
CREATE_DATABASE = "db:create_database"
GET_DATABASE = "db:get_database"
DELETE_DATABASE = "db:delete_database"
LIST_DATABASES = "db:list_databases"
LIST_COLLECTIONS = "db:list_collections"
COUNT_COLLECTIONS = "db:count_collections"
CREATE_COLLECTION = "db:create_collection"
GET_OR_CREATE_COLLECTION = "db:get_or_create_collection"
GET_COLLECTION = "collection:get_collection"
DELETE_COLLECTION = "collection:delete_collection"
UPDATE_COLLECTION = "collection:update_collection"
ADD = "collection:add"
DELETE = "collection:delete"
GET = "collection:get"
QUERY = "collection:query"
COUNT = "collection:count"
UPDATE = "collection:update"
UPSERT = "collection:upsert"
@dataclass
| AuthzAction |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 9950,
"end": 10302
} | class ____(A22):
def m1(self, arg):
return arg
def no_issue_taint_transform_with_class_interval(c: C22):
# Should not see an issue, due to not going through the taint transform
sink_d(c.m0(_test_source()))
def add_feature_c(arg):
return arg
def add_feature_d(arg):
return arg
def add_feature_e(arg):
return arg
| C22 |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 80439,
"end": 86028
} | class ____(unittest.TestCase):
def test_related_factory_list_of_varying_size(self):
# Create our list of expected "related object counts"
related_list_sizes = [5, 5, 4, 4, 3, 3, 2, 2, 1, 1]
RELATED_LIST_SIZE = lambda: related_list_sizes.pop()
class TestRelatedObject:
def __init__(self, obj=None, one=None, two=None):
# Mock out the 'List of Related Objects' generated by RelatedFactoryList
if hasattr(obj, 'related_list'):
obj.related_list.append(self)
else:
obj.related_list = [self]
self.one = one
self.two = two
self.three = obj
class TestRelatedObjectFactoryList(factory.Factory):
class Meta:
model = TestRelatedObject
one = 1
two = factory.LazyAttribute(lambda o: o.one + 1)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 3
two = 2
# RELATED_LIST_SIZE is a lambda, this allows flexibility, as opposed
# to creating "n" related objects for every parent object...
three = factory.RelatedFactoryList(TestRelatedObjectFactoryList,
'obj',
size=RELATED_LIST_SIZE)
# Create 5 TestObjectFactories: Each with 1, 2, ... 5 related objs
for related_list_size in reversed(related_list_sizes[1::2]):
obj = TestObjectFactory.build()
# Normal fields
self.assertEqual(3, obj.one)
self.assertEqual(2, obj.two)
# RelatedFactory was built
self.assertIsNone(obj.three)
self.assertIsNotNone(obj.related_list)
for related_obj in obj.related_list:
self.assertEqual(1, related_obj.one)
self.assertEqual(2, related_obj.two)
# Each RelatedFactory in the RelatedFactoryList was passed the "parent" object
self.assertEqual(related_list_size, len(obj.related_list))
# obj.related is the list of TestRelatedObject(s)
for related_obj in obj.related_list:
self.assertEqual(obj, related_obj.three)
obj = TestObjectFactory.build(three__one=3)
# Normal fields
self.assertEqual(3, obj.one)
self.assertEqual(2, obj.two)
# RelatedFactory was build
self.assertIsNone(obj.three)
self.assertIsNotNone(obj.related_list)
# three__one was correctly parse
for related_obj in obj.related_list:
self.assertEqual(3, related_obj.one)
self.assertEqual(4, related_obj.two)
# Each RelatedFactory in RelatedFactoryList received "parent" object
self.assertEqual(related_list_size, len(obj.related_list))
for related_obj in obj.related_list:
self.assertEqual(obj, related_obj.three)
def test_related_factory_list_of_static_size(self):
RELATED_LIST_SIZE = 4
class TestRelatedObject:
def __init__(self, obj=None, one=None, two=None):
# Mock out the 'List of Related Objects' generated by RelatedFactoryList
if hasattr(obj, 'related_list'):
obj.related_list.append(self)
else:
obj.related_list = [self]
self.one = one
self.two = two
self.three = obj
class TestRelatedObjectFactoryList(factory.Factory):
class Meta:
model = TestRelatedObject
one = 1
two = factory.LazyAttribute(lambda o: o.one + 1)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 3
two = 2
three = factory.RelatedFactoryList(TestRelatedObjectFactoryList, 'obj',
size=RELATED_LIST_SIZE)
obj = TestObjectFactory.build()
# Normal fields
self.assertEqual(3, obj.one)
self.assertEqual(2, obj.two)
# RelatedFactory was built
self.assertIsNone(obj.three)
self.assertIsNotNone(obj.related_list)
for related_obj in obj.related_list:
self.assertEqual(1, related_obj.one)
self.assertEqual(2, related_obj.two)
# Each RelatedFactory in the RelatedFactoryList was passed the "parent" object
self.assertEqual(RELATED_LIST_SIZE, len(obj.related_list))
# obj.related is the list of TestRelatedObject(s)
for related_obj in obj.related_list:
self.assertEqual(obj, related_obj.three)
obj = TestObjectFactory.build(three__one=3)
# Normal fields
self.assertEqual(3, obj.one)
self.assertEqual(2, obj.two)
# RelatedFactory was build
self.assertIsNone(obj.three)
self.assertIsNotNone(obj.related_list)
# three__one was correctly parse
for related_obj in obj.related_list:
self.assertEqual(3, related_obj.one)
self.assertEqual(4, related_obj.two)
# Each RelatedFactory in RelatedFactoryList received "parent" object
self.assertEqual(RELATED_LIST_SIZE, len(obj.related_list))
for related_obj in obj.related_list:
self.assertEqual(obj, related_obj.three)
| RelatedListFactoryTestCase |
python | kamyu104__LeetCode-Solutions | Python/toss-strange-coins.py | {
"start": 31,
"end": 424
} | class ____(object):
def probabilityOfHeads(self, prob, target):
"""
:type prob: List[float]
:type target: int
:rtype: float
"""
dp = [0.0]*(target+1)
dp[0] = 1.0
for p in prob:
for i in reversed(xrange(target+1)):
dp[i] = (dp[i-1] if i >= 1 else 0.0)*p + dp[i]*(1-p)
return dp[target]
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/typing.py | {
"start": 19447,
"end": 19986
} | class ____(Generic[_DESC_co]):
"""a descriptor that refers to a descriptor.
same as :class:`.DescriptorReference` but is read-only, so that subclasses
can define a subtype as the generically contained element
"""
if TYPE_CHECKING:
def __get__(self, instance: object, owner: Any) -> _DESC_co: ...
def __set__(self, instance: Any, value: Any) -> NoReturn: ...
def __delete__(self, instance: Any) -> NoReturn: ...
_FN = TypeVar("_FN", bound=Optional[Callable[..., Any]])
| RODescriptorReference |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 22165,
"end": 23788
} | class ____:
"""Descriptor for getting and setting layout."""
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> Layout | None:
"""
Args:
obj: The Styles object.
objtype: The Styles class.
Returns:
The `Layout` object.
"""
return obj.get_rule(self.name) # type: ignore[return-value]
def __set__(self, obj: StylesBase, layout: str | Layout | None):
"""
Args:
obj: The Styles object.
layout: The layout to use. You can supply the name of the layout
or a `Layout` object.
"""
from textual.layouts.factory import Layout # Prevents circular import
from textual.layouts.factory import MissingLayout, get_layout
_rich_traceback_omit = True
if layout is None:
if obj.clear_rule("layout"):
obj.refresh(layout=True, children=True)
return
if isinstance(layout, Layout):
layout = layout.name
if obj.layout is not None and obj.layout.name == layout:
return
try:
layout_object = get_layout(layout)
except MissingLayout as error:
raise StyleValueError(
str(error),
help_text=layout_property_help_text(self.name, context="inline"),
)
if obj.set_rule("layout", layout_object):
obj.refresh(layout=True, children=True)
| LayoutProperty |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 54665,
"end": 60152
} | class ____(fixtures.MappedTest):
"""test that the 'optimized get' path accommodates deferred columns.
Original issue tested is #3468, where loading of a deferred column
in an inherited subclass would fail.
At some point, the logic tested was no longer used and a less efficient
query was used to load these columns, but the test here did not inspect
the SQL such that this would be detected.
Test was then revised to more carefully test and now targets
#7463 as well.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(10)),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
A, B = cls.classes("A", "B")
a, b = cls.tables("a", "b")
cls.mapper_registry.map_imperatively(A, a, polymorphic_on=a.c.type)
cls.mapper_registry.map_imperatively(
B,
b,
inherits=A,
polymorphic_identity="b",
properties={
"data": deferred(b.c.data),
"expr": column_property(b.c.data + "q", deferred=True),
},
)
def test_column_property(self):
A, B = self.classes("A", "B")
sess = fixture_session()
b1 = B(data="x")
sess.add(b1)
sess.flush()
b_id = b1.id
with self.sql_execution_asserter(testing.db) as asserter:
eq_(b1.expr, "xq")
asserter.assert_(
CompiledSQL(
"SELECT b.data || :data_1 AS anon_1 "
"FROM b WHERE :param_1 = b.id",
[{"param_1": b_id, "data_1": "q"}],
)
)
def test_expired_column(self):
A, B = self.classes("A", "B")
sess = fixture_session()
b1 = B(data="x")
sess.add(b1)
sess.flush()
b_id = b1.id
sess.expire(b1, ["data"])
with self.sql_execution_asserter(testing.db) as asserter:
eq_(b1.data, "x")
# uses efficient statement w/o JOIN to a
asserter.assert_(
CompiledSQL(
"SELECT b.data AS b_data FROM b WHERE :param_1 = b.id",
[{"param_1": b_id}],
)
)
def test_refresh_column(self):
"""refresh currently does not use the mapper "optimized get".
This could be added later by generalizing the code in
loading.py->load_scalar_attributes() to be used by session.refresh().
For #8703, where we are revisiting some of this logic for 2.0.0,
not doing this yet as enough is changing in 2.0 already.
"""
A, B = self.classes("A", "B")
sess = fixture_session()
b1 = B(data="x")
sess.add(b1)
sess.flush()
pk = b1.id
sess.expire(b1, ["data"])
with self.sql_execution_asserter(testing.db) as asserter:
sess.refresh(b1, ["data"])
asserter.assert_(
CompiledSQL(
# full statement that has a JOIN in it. Note that
# a.id is not included in the SELECT list
"SELECT b.data FROM a JOIN b ON a.id = b.id "
"WHERE a.id = :pk_1",
[{"pk_1": pk}],
# if we used load_scalar_attributes(), it would look like
# this
# "SELECT b.data AS b_data FROM b WHERE :param_1 = b.id",
# [{"param_1": b_id}],
)
)
def test_load_from_unloaded_subclass(self):
A, B = self.classes("A", "B")
sess = fixture_session()
b1 = B(data="x")
sess.add(b1)
sess.commit()
b_id = b1.id
sess.close()
# load polymorphically in terms of A, so that B needs another
# SELECT
b1 = sess.execute(select(A)).scalar()
# it's not loaded
assert "data" not in b1.__dict__
# but it loads successfully when requested
with self.sql_execution_asserter(testing.db) as asserter:
eq_(b1.data, "x")
# uses efficient statement w/o JOIN to a
asserter.assert_(
CompiledSQL(
"SELECT b.data AS b_data FROM b WHERE :param_1 = b.id",
[{"param_1": b_id}],
)
)
def test_load_from_expired_subclass(self):
A, B = self.classes("A", "B")
sess = fixture_session()
b1 = B(data="x")
sess.add(b1)
sess.commit()
b_id = b1.id
sess.close()
b1 = sess.execute(select(A)).scalar()
# it's not loaded
assert "data" not in b1.__dict__
eq_(b1.data, "x")
sess.expire(b1, ["data"])
with self.sql_execution_asserter(testing.db) as asserter:
eq_(b1.data, "x")
# uses efficient statement w/o JOIN to a
asserter.assert_(
CompiledSQL(
"SELECT b.data AS b_data FROM b WHERE :param_1 = b.id",
[{"param_1": b_id}],
)
)
| OptimizedGetOnDeferredTest |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/helpers/latest_releases.py | {
"start": 1866,
"end": 2264
} | class ____(BoostedRelease):
"""
Class the represents a boosted release with added information that are injected after the base release is
fetched from the cache.
"""
version: str
platform: Platform
def is_active(self, current_timestamp: float) -> bool:
return current_timestamp <= self.timestamp + self.platform.time_to_adoption
@dataclass
| ExtendedBoostedRelease |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/utils.py | {
"start": 6387,
"end": 6883
} | class ____(Provider):
max_tokens_key = "maxTokenCount"
def get_text_from_response(self, response: dict) -> str:
return response["results"][0]["outputText"]
def get_text_from_stream_response(self, response: dict) -> str:
return response["outputText"]
def get_request_body(self, prompt: str, inference_parameters: dict) -> dict:
return {
"inputText": prompt,
"textGenerationConfig": {**inference_parameters},
}
| AmazonProvider |
python | google__jax | jax/_src/interpreters/batching.py | {
"start": 18737,
"end": 38346
} | class ____(Trace):
def __init__(self, parent_trace, tag, axis_data):
super().__init__()
self.parent_trace = parent_trace
assert isinstance(axis_data, AxisData)
self.axis_data = axis_data
self.tag = tag
self.requires_low = False
def to_batch_info(self, val):
if isinstance(val, BatchTracer) and val._trace.tag is self.tag:
return val.val, val.batch_dim
else:
return val, not_mapped
def process_primitive(self, p, tracers, params):
if config.dynamic_shapes.value:
p.abstract_eval(*(map(core.get_aval, tracers)), **params)
vals_in, dims_in = unzip2(map(self.to_batch_info, tracers))
args_not_mapped = all(bdim is not_mapped for bdim in dims_in)
if p in fancy_primitive_batchers:
if (args_not_mapped
and p in skippable_batchers
and not any(self.axis_data.name == axis_name
for axis_name in skippable_batchers[p](params))):
# no-op shortcut
return p.bind_with_trace(self.parent_trace, vals_in, params)
else:
with core.set_current_trace(self.parent_trace):
val_out, dim_out = fancy_primitive_batchers[p](
self.axis_data, vals_in, dims_in, **params)
elif args_not_mapped:
# no-op shortcut
return p.bind_with_trace(self.parent_trace, vals_in, params)
elif p in primitive_batchers:
with core.set_current_trace(self.parent_trace):
val_out, dim_out = primitive_batchers[p](vals_in, dims_in, **params)
else:
raise NotImplementedError(f"Batching rule for '{p}' not implemented")
src = source_info_util.current()
if p.multiple_results:
with core.set_current_trace(self.parent_trace): # val_out may be lazy map
return [BatchTracer(self, x, d, src) if d is not not_mapped else x
for x, d in zip(val_out, dim_out)]
else:
return (BatchTracer(self, val_out, dim_out, src)
if dim_out is not not_mapped else val_out)
def process_call(self, call_primitive, f, tracers, params):
assert call_primitive.multiple_results
params = dict(params, name=params.get('name', f.__name__))
vals, dims = unzip2(map(self.to_batch_info, tracers))
segment_lens, dims = indirectify_ragged_axes(dims)
f_, dims_out = batch_subtrace(f, self.tag, self.axis_data, tuple(dims))
f_ = _update_annotation(
f_, f.in_type, self.axis_data.size, self.axis_data.name, dims, segment_lens)
with core.set_current_trace(self.parent_trace):
vals_out = call_primitive.bind(f_, *segment_lens, *vals, **params)
vals_out, dims_out = resolve_ragged_axes(vals_out, dims_out())
src = source_info_util.current()
return [BatchTracer(self, v, d, src) for v, d in zip(vals_out, dims_out)]
def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):
vals, dims = unzip2(map(self.to_batch_info, tracers))
# The logic for the dimension math below is as follows:
# ╔═════════════╦════════════════════════════════════════╦═══════════╗
# ║ d / in_axis ║ None ║ int ║
# ╠═════════════╬════════════════════════════════════════╩═══════════╣
# ║ None ║ No extra axis, so in_axis unaffected ║
# ╠═════════════╬════════════════════════════════════════╦═══════════╣
# ║ int ║ Not mapped, so batching dim unaffected ║ See below ║
# ╚═════════════╩════════════════════════════════════════╩═══════════╝
# When both d and in_axis are defined then:
# - If `d <= in_axis`, we have to move the `in_axis` one dimension further;
# - If `d > in_axis`, we have to decrement `d` (as `in_axis` will get removed).
def both_mapped(in_out_axis, d):
return in_out_axis is not None and d is not not_mapped
new_in_axes = tuple(
in_axis + 1 if both_mapped(in_axis, d) and d <= in_axis else in_axis
for d, in_axis in zip(dims, params['in_axes']))
new_dims = tuple(
d - 1 if both_mapped(in_axis, d) and in_axis < d else d
for d, in_axis in zip(dims, params['in_axes']))
f, dims_out = batch_subtrace(f, self.tag, self.axis_data, new_dims)
out_axes_thunk = params['out_axes_thunk']
# NOTE: This assumes that the choice of the dimensions over which outputs
# are batched is entirely dependent on the function and not e.g. on the
# data or its shapes.
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
return tuple(out_axis + 1 if both_mapped(out_axis, d) and d < out_axis else out_axis
for out_axis, d in zip(out_axes_thunk(), dims_out()))
new_params = dict(params, in_axes=new_in_axes, out_axes_thunk=new_out_axes_thunk)
with core.set_current_trace(self.parent_trace):
vals_out = map_primitive.bind(f, *vals, **new_params)
dims_out_ = [d + 1 if both_mapped(out_axis, d) and out_axis <= d else d
for d, out_axis in zip(dims_out(), out_axes_thunk())]
src = source_info_util.current()
return [BatchTracer(self, v, d, src) for v, d in zip(vals_out, dims_out_)]
def process_custom_jvp_call(self, prim, fun, jvp, tracers, *, symbolic_zeros):
in_vals, in_dims = unzip2(map(self.to_batch_info, tracers))
fun, out_dims1 = batch_subtrace(fun, self.tag, self.axis_data, in_dims)
jvp, out_dims2 = batch_custom_jvp_subtrace(jvp, self.tag, self.axis_data, in_dims)
out_vals = prim.bind_with_trace(self.parent_trace, (fun, jvp, *in_vals),
dict(symbolic_zeros=symbolic_zeros))
fst, out_dims = lu.merge_linear_aux(out_dims1, out_dims2)
src = source_info_util.current()
return [BatchTracer(self, v, d, src) for v, d in zip(out_vals, out_dims)]
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, *, out_trees,
symbolic_zeros): # pytype: disable=signature-mismatch
in_vals, in_dims = unzip2(map(self.to_batch_info, tracers))
fwd_in_dims = [d for in_dim in in_dims for d in [in_dim, not_mapped]]
fun, out_dims1 = batch_subtrace(fun, self.tag, self.axis_data, in_dims)
fwd, out_dims2 = batch_subtrace(fwd, self.tag, self.axis_data, fwd_in_dims)
def bwd_in_dims():
_, _, input_fwds = out_trees()
pruned_dims = iter(out_dims2())
full_dims = [next(pruned_dims) if f is None else in_dims[f] for f in input_fwds]
return [*full_dims, *pruned_dims]
bwd = batch_custom_vjp_bwd(bwd, self.tag, self.axis_data, bwd_in_dims, in_dims)
out_vals = prim.bind_with_trace(self.parent_trace,
(fun, fwd, bwd) + tuple(in_vals),
dict(out_trees=out_trees, symbolic_zeros=symbolic_zeros))
fst, out_dims = lu.merge_linear_aux(out_dims1, out_dims2)
if not fst:
_, res_tree, input_fwds = out_trees()
num_res = res_tree.num_leaves - sum(f is not None for f in input_fwds)
_, out_dims = split_list(out_dims, [num_res])
src = source_info_util.current()
return [BatchTracer(self, v, d, src) for v, d in zip(out_vals, out_dims)]
### API for batching callables with vmappable inputs and outputs
def batch(fun: lu.WrappedFun, axis_data,
in_dims, out_dim_dests) -> lu.WrappedFun:
# we split up _batch_inner and _batch_outer for the leak checker
f = _batch_inner(fun, axis_data, out_dim_dests)
return _batch_outer(f, axis_data, in_dims)
@lu.transformation2
def _batch_outer(f, axis_data, in_dims, *in_vals):
tag = TraceTag()
with source_info_util.transform_name_stack('vmap'):
outs, trace = f(tag, in_dims, *in_vals)
with core.ensure_no_leaks(trace): del trace
return outs
@lu.transformation2
def _batch_inner(f: Callable, axis_data, out_dim_dests, tag, in_dims, *in_vals):
in_dims = in_dims() if callable(in_dims) else in_dims
with core.take_current_trace() as parent_trace:
trace = BatchTrace(parent_trace, tag, axis_data)
idx = memoize(lambda: BatchTracer(trace, make_iota(axis_data.size), 0,
source_info_util.current()))
with core.set_current_trace(parent_trace):
in_tracers = map(partial(to_elt, trace, idx), in_vals, in_dims)
# TODO(yashkatariya): Instead of `add_explicit_mesh_axis_names`, we should
# create a new mesh by removing the axis_data.explicit_mesh_axis from it.
with (core.set_current_trace(trace),
core.extend_axis_env_nd([(axis_data.name, axis_data.size)]),
core.add_spmd_axis_names(axis_data.spmd_name),
core.add_explicit_mesh_axis_names(axis_data.explicit_mesh_axis)):
outs = f(*in_tracers)
out_dim_dests = out_dim_dests() if callable(out_dim_dests) else out_dim_dests
out_vals = map(partial(from_elt, trace, axis_data.size, axis_data.explicit_mesh_axis),
range(len(outs)), outs, out_dim_dests)
return out_vals, trace
# NOTE: This divides the in_axes by the tile_size and multiplies the out_axes by it.
def vtile(f_flat: lu.WrappedFun,
in_axes_flat: tuple[int | None, ...],
out_axes_flat: tuple[int | None, ...],
tile_size: int | None,
axis_name: AxisName):
@curry
def tile_axis(arg, axis: int | None, tile_size):
if axis is None:
return arg
shape = list(arg.shape)
shape[axis:axis+1] = [tile_size, shape[axis] // tile_size]
return arg.reshape(shape)
def untile_axis(out, axis: int | None):
if axis is None:
return out
shape = list(out.shape)
shape[axis:axis+2] = [shape[axis] * shape[axis+1]]
return out.reshape(shape)
@lu.transformation2
def _map_to_tile(f, *args_flat):
sizes = (x.shape[i] for x, i in safe_zip(args_flat, in_axes_flat) if i is not None)
tile_size_ = tile_size or next(sizes, None)
assert tile_size_ is not None, "No mapped arguments?"
outputs_flat = f(*map(tile_axis(tile_size=tile_size_), args_flat, in_axes_flat))
return map(untile_axis, outputs_flat, out_axes_flat)
axis_data = AxisData(axis_name, tile_size, None, None)
return _map_to_tile(batch(f_flat, axis_data, in_axes_flat, out_axes_flat))
### API for batching functions with jaxpr type inputs and outputs
@lu.transformation_with_aux2
def batch_subtrace(f, store, tag, axis_data, in_dims, *in_vals):
with core.take_current_trace() as parent_trace:
trace = BatchTrace(parent_trace, tag, axis_data)
with core.set_current_trace(trace):
in_dims = in_dims() if callable(in_dims) else in_dims
in_vals, in_dims = resolve_ragged_axes(in_vals, in_dims)
in_tracers = [BatchTracer(trace, x, dim, source_info_util.current())
if dim is not None else x for x, dim in zip(in_vals, in_dims)]
outs = f(*in_tracers)
out_vals, out_dims = unzip2(map(trace.to_batch_info, outs))
segment_lens, out_dims = indirectify_ragged_axes(out_dims)
store.store(out_dims)
return (*segment_lens, *out_vals)
def indirectify_ragged_axes(dims):
if not any(type(d) is RaggedAxis for d in dims):
return [], dims
axis_map : dict[int, tuple[Array, pe.DBIdx]] = collections.OrderedDict()
def canonicalize_segment_lengths(d: RaggedAxis) -> RaggedAxis:
new_ragged_axes = []
for ragged_axis, segment_lengths in d.ragged_axes:
_, dbidx = axis_map.setdefault(
id(core.get_referent(segment_lengths)),
(segment_lengths, pe.DBIdx(len(axis_map))))
new_ragged_axes.append((ragged_axis, dbidx))
return RaggedAxis(d.stacked_axis, tuple(new_ragged_axes))
new_dims = [canonicalize_segment_lengths(d)
if isinstance(d, RaggedAxis) else d for d in dims]
segment_lens = [s for s, _ in axis_map.values()]
return segment_lens, new_dims
def indirectify_ragged_axes_against_inputs_outputs(dims, in_vals, out_vals):
def canonicalize_segment_lengths(d: RaggedAxis) -> RaggedAxis:
new_ragged_axes = []
for ragged_axis, segment_lengths in d.ragged_axes:
key = id(core.get_referent(segment_lengths))
value = _locate_value(key, in_vals, out_vals)
new_ragged_axes.append((ragged_axis, value))
return RaggedAxis(d.stacked_axis, tuple(new_ragged_axes))
new_dims = [canonicalize_segment_lengths(d)
if isinstance(d, RaggedAxis) else d for d in dims]
return new_dims
def _locate_value(key, in_vals, out_vals):
for ix, candidate in enumerate(in_vals):
if key == id(candidate):
return pe.InDBIdx(ix)
for ix, candidate in enumerate(out_vals):
if key == id(candidate):
return pe.OutDBIdx(ix)
assert False, "Could not find segment lengths"
def resolve_ragged_axes(vals, dims):
idxs = {lengths_idx.val for d in dims if isinstance(d, RaggedAxis)
for (_, lengths_idx) in d.ragged_axes}
dims = [RaggedAxis(d.stacked_axis,
tuple((ragged_axis, vals[lengths_idx.val])
for ragged_axis, lengths_idx in d.ragged_axes))
if isinstance(d, RaggedAxis) else d for d in dims]
vals = [x for i, x in enumerate(vals) if i not in idxs]
return vals, dims
def resolve_ragged_axes_against_inputs_outputs(in_vals, out_vals, dims):
def fetch(idx):
if isinstance(idx, pe.InDBIdx):
return in_vals[idx.val]
else:
assert isinstance(idx, pe.OutDBIdx)
return out_vals[idx.val]
dims = [RaggedAxis(d.stacked_axis,
tuple((ragged_axis, fetch(lengths_idx))
for ragged_axis, lengths_idx in d.ragged_axes))
if isinstance(d, RaggedAxis) else d for d in dims]
return dims
### API for batching jaxprs
# TODO(axch): parameterize RaggedAxis annotations by a type parameter so as to
# indicate whether we're dealing with instances that contain Arrays or DBIdx.
# Can reuse same pattern for all dynamic shape stuff.
def batch_jaxpr2(
closed_jaxpr: core.ClosedJaxpr,
axis_data,
in_axes: tuple[int | NotMapped | RaggedAxis, ...],
) -> tuple[core.ClosedJaxpr, tuple[int | NotMapped | RaggedAxis, ...]]:
return _batch_jaxpr2(closed_jaxpr, axis_data, tuple(in_axes))
@weakref_lru_cache
def _batch_jaxpr2(
closed_jaxpr: core.ClosedJaxpr,
axis_data,
in_axes: tuple[int | NotMapped | RaggedAxis, ...],
) -> tuple[core.ClosedJaxpr, tuple[int | NotMapped, ...]]:
f = lu.wrap_init(core.jaxpr_as_fun(closed_jaxpr),
debug_info=closed_jaxpr.jaxpr.debug_info)
f, out_axes = _batch_jaxpr_inner(f, axis_data)
f = _batch_jaxpr_outer(f, axis_data, in_axes)
in_axes2, avals_in = unzip2([
handle_ragged(closed_jaxpr.in_avals, dim, aval)
if isinstance(dim, RaggedAxis) else (dim, aval)
for dim, aval in zip(in_axes, closed_jaxpr.in_avals)])
avals_in2 = []
for aval, b in unsafe_zip(avals_in, in_axes2):
if b is not_mapped:
avals_in2.append(aval)
else:
aval = core.unmapped_aval(
axis_data.size, b, aval, axis_data.explicit_mesh_axis)
if axis_data.spmd_name is not None:
if config._check_vma.value:
aval = aval.update(vma=aval.vma | frozenset(axis_data.spmd_name)) # type: ignore
avals_in2.append(aval)
jaxpr_out, _, consts = pe.trace_to_jaxpr_dynamic(f, avals_in2)
return core.ClosedJaxpr(jaxpr_out, consts), out_axes()
def handle_ragged(in_avals: list[core.AbstractValue], dim: RaggedAxis,
aval: core.ShapedArray) -> tuple[int, core.ShapedArray]:
new_shape = list(aval.shape)
for i, dbi in dim.ragged_axes:
new_shape[i - (dim.stacked_axis < i)] = in_avals[dbi.val].dtype.bound
new_aval = aval.update(shape=tuple(new_shape))
return dim.stacked_axis, new_aval
def batch_jaxpr(closed_jaxpr, axis_data, in_batched, instantiate):
inst = tuple(instantiate) if isinstance(instantiate, list) else instantiate
return _batch_jaxpr(closed_jaxpr, axis_data, tuple(in_batched), inst)
def _batch_jaxpr(closed_jaxpr, axis_data, in_batched, instantiate):
assert (isinstance(instantiate, bool) or
isinstance(instantiate, (list, tuple)) and
all(isinstance(b, bool) for b in instantiate))
if isinstance(instantiate, bool):
instantiate = [instantiate] * len(closed_jaxpr.out_avals)
in_axes = [0 if b else not_mapped for b in in_batched]
out_axes_dest = [0 if inst else zero_if_mapped for inst in instantiate]
return batch_jaxpr_axes(closed_jaxpr, axis_data, in_axes, out_axes_dest)
def batch_jaxpr_axes(closed_jaxpr, axis_data, in_axes, out_axes_dest):
return _batch_jaxpr_axes(closed_jaxpr, axis_data, tuple(in_axes), tuple(out_axes_dest))
@weakref_lru_cache
def _batch_jaxpr_axes(closed_jaxpr: core.ClosedJaxpr,
axis_data: AxisData,
in_axes: Sequence[int], out_axes_dest: Sequence[int]):
f = lu.wrap_init(core.jaxpr_as_fun(closed_jaxpr),
debug_info=closed_jaxpr.jaxpr.debug_info)
f, out_axes = _batch_jaxpr_inner(f, axis_data)
f, out_batched = _match_axes_jaxpr(f, axis_data, out_axes_dest, out_axes)
f = _batch_jaxpr_outer(f, axis_data, in_axes)
avals_in = [core.unmapped_aval(axis_data.size, b, aval,
axis_data.explicit_mesh_axis)
if b is not not_mapped
else aval for aval, b in unsafe_zip(closed_jaxpr.in_avals, in_axes)]
jaxpr_out, _, consts = pe.trace_to_jaxpr_dynamic(f, avals_in)
return core.ClosedJaxpr(jaxpr_out, consts), out_batched()
@lu.transformation_with_aux2
def _batch_jaxpr_inner(f, store, axis_data, tag, in_axes, *in_vals):
with core.take_current_trace() as parent_trace:
trace = BatchTrace(parent_trace, tag, axis_data)
_, in_axes = resolve_ragged_axes(in_vals, in_axes)
in_tracers = [BatchTracer(trace, val, dim) if dim is not None else val
for val, dim in zip(in_vals, in_axes)]
# TODO(yashkatariya): Instead of `add_explicit_mesh_axis_names`, we should
# create a new mesh by removing the axis_data.explicit_mesh_axis from it.
with (core.set_current_trace(trace),
core.extend_axis_env_nd([(axis_data.name, axis_data.size)]),
core.add_spmd_axis_names(axis_data.spmd_name),
core.add_explicit_mesh_axis_names(axis_data.explicit_mesh_axis)):
outs = f(*in_tracers)
out_vals, out_axes = unzip2(map(trace.to_batch_info, outs))
new_out_axes = indirectify_ragged_axes_against_inputs_outputs(
out_axes, in_vals, out_vals)
store.store(new_out_axes)
return out_vals
@lu.transformation_with_aux2
def _match_axes_jaxpr(f, store, axis_data, out_axes_dest, out_axes, trace, in_axes,
*in_vals):
out_vals = f(trace, in_axes, *in_vals)
out_axes = out_axes()
out_axes_dest = [(None if src is not_mapped else 0)
if dst is zero_if_mapped else dst
for src, dst in unsafe_zip(out_axes, out_axes_dest)]
if len(out_axes_dest) != len(out_axes):
out_axis_dest, = out_axes_dest
out_axes_dest = [out_axis_dest] * len(out_axes)
out_vals = map(partial(matchaxis, axis_data.name, axis_data.size,
axis_data.explicit_mesh_axis),
out_axes, out_axes_dest, out_vals)
out_batched = [dst is not None for dst in out_axes_dest]
store.store(out_batched)
return out_vals
@lu.transformation2
def _batch_jaxpr_outer(f, axis_data, in_dims, *in_vals):
in_dims = in_dims() if callable(in_dims) else in_dims
in_dims = [canonicalize_axis(ax, np.ndim(x)) if isinstance(ax, int)
else ax for x, ax in unsafe_zip(in_vals, in_dims)]
tag = TraceTag()
return f(tag, in_dims, *in_vals)
def _merge_bdims(x, y):
if x == y:
return x
elif x is not_mapped:
return y
elif y is not_mapped:
return x
else:
return x # arbitrary
| BatchTrace |
python | joke2k__faker | faker/providers/currency/pt_BR/__init__.py | {
"start": 46,
"end": 262
} | class ____(CurrencyProvider):
price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
def pricetag(self) -> str:
return "R$" + self.numerify(self.random_element(self.price_formats))
| Provider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_base.py | {
"start": 76833,
"end": 80123
} | class ____(_ClassScanAbstractConfig):
"""Configurator that will produce an unmapped dataclass."""
__slots__ = (
"clsdict_view",
"collected_attributes",
"collected_annotations",
"allow_dataclass_fields",
"dataclass_setup_arguments",
"is_dataclass_prior_to_mapping",
"allow_unmapped_annotations",
)
def __init__(
self,
cls_: Type[_O],
dict_: _ClassDict,
):
super().__init__(cls_)
self.clsdict_view = (
util.immutabledict(dict_) if dict_ else util.EMPTY_DICT
)
self.dataclass_setup_arguments = getattr(
self.cls, "_sa_apply_dc_transforms", None
)
self.is_dataclass_prior_to_mapping = dataclasses.is_dataclass(cls_)
self.allow_dataclass_fields = False
self.allow_unmapped_annotations = True
self.collected_attributes = {}
self.collected_annotations = {}
self._scan_attributes()
self._setup_dataclasses_transforms(
enable_descriptor_defaults=False, revert=True
)
def _scan_attributes(self) -> None:
cls = self.cls
clsdict_view = self.clsdict_view
collected_attributes = self.collected_attributes
_include_dunders = self._include_dunders
attribute_is_overridden = self._cls_attr_override_checker(self.cls)
local_attributes_for_class = self._cls_attr_resolver(cls)
for (
name,
obj,
annotation,
is_dataclass_field,
) in local_attributes_for_class():
if name in _include_dunders:
continue
elif is_dataclass_field and (
name not in clsdict_view or clsdict_view[name] is not obj
):
# here, we are definitely looking at the target class
# and not a superclass. this is currently a
# dataclass-only path. if the name is only
# a dataclass field and isn't in local cls.__dict__,
# put the object there.
# assert that the dataclass-enabled resolver agrees
# with what we are seeing
assert not attribute_is_overridden(name, obj)
if _is_declarative_props(obj):
obj = obj.fget()
collected_attributes[name] = obj
self._collect_annotation(name, annotation, cls, False, obj)
else:
self._collect_annotation(name, annotation, cls, None, obj)
if name in clsdict_view:
collected_attributes[name] = obj
@util.preload_module("sqlalchemy.orm.decl_api")
def _as_dc_declaredattr(
field_metadata: Mapping[str, Any], sa_dataclass_metadata_key: str
) -> Any:
# wrap lambdas inside dataclass fields inside an ad-hoc declared_attr.
# we can't write it because field.metadata is immutable :( so we have
# to go through extra trouble to compare these
decl_api = util.preloaded.orm_decl_api
obj = field_metadata[sa_dataclass_metadata_key]
if callable(obj) and not isinstance(obj, decl_api.declared_attr):
return decl_api.declared_attr(obj)
else:
return obj
| _UnmappedDataclassConfig |
python | encode__django-rest-framework | rest_framework/schemas/openapi.py | {
"start": 3954,
"end": 26873
} | class ____(ViewInspector):
def __init__(self, tags=None, operation_id_base=None, component_name=None):
"""
:param operation_id_base: user-defined name in operationId. If empty, it will be deducted from the Model/Serializer/View name.
:param component_name: user-defined component's name. If empty, it will be deducted from the Serializer's class name.
"""
if tags and not all(isinstance(tag, str) for tag in tags):
raise ValueError('tags must be a list or tuple of string.')
self._tags = tags
self.operation_id_base = operation_id_base
self.component_name = component_name
super().__init__()
request_media_types = []
response_media_types = []
method_mapping = {
'get': 'retrieve',
'post': 'create',
'put': 'update',
'patch': 'partialUpdate',
'delete': 'destroy',
}
def get_operation(self, path, method):
operation = {}
operation['operationId'] = self.get_operation_id(path, method)
operation['description'] = self.get_description(path, method)
parameters = []
parameters += self.get_path_parameters(path, method)
parameters += self.get_pagination_parameters(path, method)
parameters += self.get_filter_parameters(path, method)
operation['parameters'] = parameters
request_body = self.get_request_body(path, method)
if request_body:
operation['requestBody'] = request_body
operation['responses'] = self.get_responses(path, method)
operation['tags'] = self.get_tags(path, method)
return operation
def get_component_name(self, serializer):
"""
Compute the component's name from the serializer.
Raise an exception if the serializer's class name is "Serializer" (case-insensitive).
"""
if self.component_name is not None:
return self.component_name
# use the serializer's class name as the component name.
component_name = serializer.__class__.__name__
# We remove the "serializer" string from the class name.
pattern = re.compile("serializer", re.IGNORECASE)
component_name = pattern.sub("", component_name)
if component_name == "":
raise Exception(
'"{}" is an invalid class name for schema generation. '
'Serializer\'s class name should be unique and explicit. e.g. "ItemSerializer"'
.format(serializer.__class__.__name__)
)
return component_name
def get_components(self, path, method):
"""
Return components with their properties from the serializer.
"""
if method.lower() == 'delete':
return {}
request_serializer = self.get_request_serializer(path, method)
response_serializer = self.get_response_serializer(path, method)
components = {}
if isinstance(request_serializer, serializers.Serializer):
component_name = self.get_component_name(request_serializer)
content = self.map_serializer(request_serializer)
components.setdefault(component_name, content)
if isinstance(response_serializer, serializers.Serializer):
component_name = self.get_component_name(response_serializer)
content = self.map_serializer(response_serializer)
components.setdefault(component_name, content)
return components
def _to_camel_case(self, snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
def get_operation_id_base(self, path, method, action):
"""
Compute the base part for operation ID from the model, serializer or view name.
"""
model = getattr(getattr(self.view, 'queryset', None), 'model', None)
if self.operation_id_base is not None:
name = self.operation_id_base
# Try to deduce the ID from the view's model
elif model is not None:
name = model.__name__
# Try with the serializer class name
elif self.get_serializer(path, method) is not None:
name = self.get_serializer(path, method).__class__.__name__
if name.endswith('Serializer'):
name = name[:-10]
# Fallback to the view name
else:
name = self.view.__class__.__name__
if name.endswith('APIView'):
name = name[:-7]
elif name.endswith('View'):
name = name[:-4]
# Due to camel-casing of classes and `action` being lowercase, apply title in order to find if action truly
# comes at the end of the name
if name.endswith(action.title()): # ListView, UpdateAPIView, ThingDelete ...
name = name[:-len(action)]
if action == 'list':
assert inflection, '`inflection` must be installed for OpenAPI schema support.'
name = inflection.pluralize(name)
return name
def get_operation_id(self, path, method):
"""
Compute an operation ID from the view type and get_operation_id_base method.
"""
method_name = getattr(self.view, 'action', method.lower())
if is_list_view(path, method, self.view):
action = 'list'
elif method_name not in self.method_mapping:
action = self._to_camel_case(method_name)
else:
action = self.method_mapping[method.lower()]
name = self.get_operation_id_base(path, method, action)
return action + name
def get_path_parameters(self, path, method):
"""
Return a list of parameters from templated path variables.
"""
assert uritemplate, '`uritemplate` must be installed for OpenAPI schema support.'
model = getattr(getattr(self.view, 'queryset', None), 'model', None)
parameters = []
for variable in uritemplate.variables(path):
description = ''
if model is not None: # TODO: test this.
# Attempt to infer a field description if possible.
try:
model_field = model._meta.get_field(variable)
except Exception:
model_field = None
if model_field is not None and model_field.help_text:
description = force_str(model_field.help_text)
elif model_field is not None and model_field.primary_key:
description = get_pk_description(model, model_field)
parameter = {
"name": variable,
"in": "path",
"required": True,
"description": description,
'schema': {
'type': 'string', # TODO: integer, pattern, ...
},
}
parameters.append(parameter)
return parameters
def get_filter_parameters(self, path, method):
if not self.allows_filters(path, method):
return []
parameters = []
for filter_backend in self.view.filter_backends:
parameters += filter_backend().get_schema_operation_parameters(self.view)
return parameters
def allows_filters(self, path, method):
"""
Determine whether to include filter Fields in schema.
Default implementation looks for ModelViewSet or GenericAPIView
actions/methods that cause filtering on the default implementation.
"""
if getattr(self.view, 'filter_backends', None) is None:
return False
if hasattr(self.view, 'action'):
return self.view.action in ["list", "retrieve", "update", "partial_update", "destroy"]
return method.lower() in ["get", "put", "patch", "delete"]
def get_pagination_parameters(self, path, method):
view = self.view
if not is_list_view(path, method, view):
return []
paginator = self.get_paginator()
if not paginator:
return []
return paginator.get_schema_operation_parameters(view)
def map_choicefield(self, field):
choices = list(dict.fromkeys(field.choices)) # preserve order and remove duplicates
if all(isinstance(choice, bool) for choice in choices):
type = 'boolean'
elif all(isinstance(choice, int) for choice in choices):
type = 'integer'
elif all(isinstance(choice, (int, float, Decimal)) for choice in choices): # `number` includes `integer`
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21
type = 'number'
elif all(isinstance(choice, str) for choice in choices):
type = 'string'
else:
type = None
mapping = {
# The value of `enum` keyword MUST be an array and SHOULD be unique.
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.20
'enum': choices
}
# If We figured out `type` then and only then we should set it. It must be a string.
# Ref: https://swagger.io/docs/specification/data-models/data-types/#mixed-type
# It is optional but it can not be null.
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21
if type:
mapping['type'] = type
return mapping
def map_field(self, field):
# Nested Serializers, `many` or not.
if isinstance(field, serializers.ListSerializer):
return {
'type': 'array',
'items': self.map_serializer(field.child)
}
if isinstance(field, serializers.Serializer):
data = self.map_serializer(field)
data['type'] = 'object'
return data
# Related fields.
if isinstance(field, serializers.ManyRelatedField):
return {
'type': 'array',
'items': self.map_field(field.child_relation)
}
if isinstance(field, serializers.PrimaryKeyRelatedField):
if getattr(field, "pk_field", False):
return self.map_field(field=field.pk_field)
model = getattr(field.queryset, 'model', None)
if model is not None:
model_field = model._meta.pk
if isinstance(model_field, models.AutoField):
return {'type': 'integer'}
# ChoiceFields (single and multiple).
# Q:
# - Is 'type' required?
# - can we determine the TYPE of a choicefield?
if isinstance(field, serializers.MultipleChoiceField):
return {
'type': 'array',
'items': self.map_choicefield(field)
}
if isinstance(field, serializers.ChoiceField):
return self.map_choicefield(field)
# ListField.
if isinstance(field, serializers.ListField):
mapping = {
'type': 'array',
'items': {},
}
if not isinstance(field.child, _UnvalidatedField):
mapping['items'] = self.map_field(field.child)
return mapping
# DateField and DateTimeField type is string
if isinstance(field, serializers.DateField):
return {
'type': 'string',
'format': 'date',
}
if isinstance(field, serializers.DateTimeField):
return {
'type': 'string',
'format': 'date-time',
}
# "Formats such as "email", "uuid", and so on, MAY be used even though undefined by this specification."
# see: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#data-types
# see also: https://swagger.io/docs/specification/data-models/data-types/#string
if isinstance(field, serializers.EmailField):
return {
'type': 'string',
'format': 'email'
}
if isinstance(field, serializers.URLField):
return {
'type': 'string',
'format': 'uri'
}
if isinstance(field, serializers.UUIDField):
return {
'type': 'string',
'format': 'uuid'
}
if isinstance(field, serializers.IPAddressField):
content = {
'type': 'string',
}
if field.protocol != 'both':
content['format'] = field.protocol
return content
if isinstance(field, serializers.DecimalField):
if getattr(field, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING):
content = {
'type': 'string',
'format': 'decimal',
}
else:
content = {
'type': 'number'
}
if field.decimal_places:
content['multipleOf'] = float('.' + (field.decimal_places - 1) * '0' + '1')
if field.max_whole_digits:
content['maximum'] = int(field.max_whole_digits * '9') + 1
content['minimum'] = -content['maximum']
self._map_min_max(field, content)
return content
if isinstance(field, serializers.FloatField):
content = {
'type': 'number',
}
self._map_min_max(field, content)
return content
if isinstance(field, serializers.IntegerField):
content = {
'type': 'integer'
}
self._map_min_max(field, content)
# 2147483647 is max for int32_size, so we use int64 for format
if int(content.get('maximum', 0)) > 2147483647 or int(content.get('minimum', 0)) > 2147483647:
content['format'] = 'int64'
return content
if isinstance(field, serializers.FileField):
return {
'type': 'string',
'format': 'binary'
}
# Simplest cases, default to 'string' type:
FIELD_CLASS_SCHEMA_TYPE = {
serializers.BooleanField: 'boolean',
serializers.JSONField: 'object',
serializers.DictField: 'object',
serializers.HStoreField: 'object',
}
return {'type': FIELD_CLASS_SCHEMA_TYPE.get(field.__class__, 'string')}
def _map_min_max(self, field, content):
if field.max_value:
content['maximum'] = field.max_value
if field.min_value:
content['minimum'] = field.min_value
def map_serializer(self, serializer):
# Assuming we have a valid serializer instance.
required = []
properties = {}
for field in serializer.fields.values():
if isinstance(field, serializers.HiddenField):
continue
if field.required and not serializer.partial:
required.append(self.get_field_name(field))
schema = self.map_field(field)
if field.read_only:
schema['readOnly'] = True
if field.write_only:
schema['writeOnly'] = True
if field.allow_null:
schema['nullable'] = True
if field.default is not None and field.default != empty and not callable(field.default):
schema['default'] = field.default
if field.help_text:
schema['description'] = str(field.help_text)
self.map_field_validators(field, schema)
properties[self.get_field_name(field)] = schema
result = {
'type': 'object',
'properties': properties
}
if required:
result['required'] = required
return result
def map_field_validators(self, field, schema):
"""
map field validators
"""
for v in field.validators:
# "Formats such as "email", "uuid", and so on, MAY be used even though undefined by this specification."
# https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#data-types
if isinstance(v, EmailValidator):
schema['format'] = 'email'
if isinstance(v, URLValidator):
schema['format'] = 'uri'
if isinstance(v, RegexValidator):
# In Python, the token \Z does what \z does in other engines.
# https://stackoverflow.com/questions/53283160
schema['pattern'] = v.regex.pattern.replace('\\Z', '\\z')
elif isinstance(v, MaxLengthValidator):
attr_name = 'maxLength'
if isinstance(field, serializers.ListField):
attr_name = 'maxItems'
schema[attr_name] = v.limit_value
elif isinstance(v, MinLengthValidator):
attr_name = 'minLength'
if isinstance(field, serializers.ListField):
attr_name = 'minItems'
schema[attr_name] = v.limit_value
elif isinstance(v, MaxValueValidator):
schema['maximum'] = v.limit_value
elif isinstance(v, MinValueValidator):
schema['minimum'] = v.limit_value
elif isinstance(v, DecimalValidator) and \
not getattr(field, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING):
if v.decimal_places:
schema['multipleOf'] = float('.' + (v.decimal_places - 1) * '0' + '1')
if v.max_digits:
digits = v.max_digits
if v.decimal_places is not None and v.decimal_places > 0:
digits -= v.decimal_places
schema['maximum'] = int(digits * '9') + 1
schema['minimum'] = -schema['maximum']
def get_field_name(self, field):
"""
Override this method if you want to change schema field name.
For example, convert snake_case field name to camelCase.
"""
return field.field_name
def get_paginator(self):
pagination_class = getattr(self.view, 'pagination_class', None)
if pagination_class:
return pagination_class()
return None
def map_parsers(self, path, method):
return list(map(attrgetter('media_type'), self.view.parser_classes))
def map_renderers(self, path, method):
media_types = []
for renderer in self.view.renderer_classes:
# BrowsableAPIRenderer not relevant to OpenAPI spec
if issubclass(renderer, renderers.BrowsableAPIRenderer):
continue
media_types.append(renderer.media_type)
return media_types
def get_serializer(self, path, method):
view = self.view
if not hasattr(view, 'get_serializer'):
return None
try:
return view.get_serializer()
except exceptions.APIException:
warnings.warn('{}.get_serializer() raised an exception during '
'schema generation. Serializer fields will not be '
'generated for {} {}.'
.format(view.__class__.__name__, method, path))
return None
def get_request_serializer(self, path, method):
"""
Override this method if your view uses a different serializer for
handling request body.
"""
return self.get_serializer(path, method)
def get_response_serializer(self, path, method):
"""
Override this method if your view uses a different serializer for
populating response data.
"""
return self.get_serializer(path, method)
def get_reference(self, serializer):
return {'$ref': f'#/components/schemas/{self.get_component_name(serializer)}'}
def get_request_body(self, path, method):
if method not in ('PUT', 'PATCH', 'POST'):
return {}
self.request_media_types = self.map_parsers(path, method)
serializer = self.get_request_serializer(path, method)
if not isinstance(serializer, serializers.Serializer):
item_schema = {}
else:
item_schema = self.get_reference(serializer)
return {
'content': {
ct: {'schema': item_schema}
for ct in self.request_media_types
}
}
def get_responses(self, path, method):
if method == 'DELETE':
return {
'204': {
'description': ''
}
}
self.response_media_types = self.map_renderers(path, method)
serializer = self.get_response_serializer(path, method)
if not isinstance(serializer, serializers.Serializer):
item_schema = {}
else:
item_schema = self.get_reference(serializer)
if is_list_view(path, method, self.view):
response_schema = {
'type': 'array',
'items': item_schema,
}
paginator = self.get_paginator()
if paginator:
response_schema = paginator.get_paginated_response_schema(response_schema)
else:
response_schema = item_schema
status_code = '201' if method == 'POST' else '200'
return {
status_code: {
'content': {
ct: {'schema': response_schema}
for ct in self.response_media_types
},
# description is a mandatory property,
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#responseObject
# TODO: put something meaningful into it
'description': ""
}
}
def get_tags(self, path, method):
# If user have specified tags, use them.
if self._tags:
return self._tags
# First element of a specific path could be valid tag. This is a fallback solution.
# PUT, PATCH, GET(Retrieve), DELETE: /user_profile/{id}/ tags = [user-profile]
# POST, GET(List): /user_profile/ tags = [user-profile]
if path.startswith('/'):
path = path[1:]
return [path.split('/')[0].replace('_', '-')]
| AutoSchema |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_project_uptime_alert_details.py | {
"start": 9220,
"end": 9912
} | class ____(ProjectUptimeAlertDetailsBaseEndpointTest):
method = "delete"
def test_user(self) -> None:
detector = self.create_uptime_detector()
with self.tasks():
self.get_success_response(
self.organization.slug,
detector.project.slug,
detector.id,
status_code=202,
)
with pytest.raises(UptimeSubscription.DoesNotExist):
get_uptime_subscription(detector)
def test_not_found(self) -> None:
resp = self.get_error_response(self.organization.slug, self.project.slug, 3)
assert resp.status_code == 404
| ProjectUptimeAlertDetailsDeleteEndpointTest |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/test_security.py | {
"start": 3871,
"end": 4194
} | class ____(ModelView):
datamodel = SQLAInterface(SomeModel)
base_permissions = [
"can_list",
"can_show",
"can_add",
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
list_columns = ["field_string", "field_integer", "field_float", "field_date"]
| SomeModelView |
python | numpy__numpy | numpy/distutils/fcompiler/pathf95.py | {
"start": 85,
"end": 1061
} | class ____(FCompiler):
compiler_type = 'pathf95'
description = 'PathScale Fortran Compiler'
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
executables = {
'version_cmd' : ["pathf95", "-version"],
'compiler_f77' : ["pathf95", "-fixedform"],
'compiler_fix' : ["pathf95", "-fixedform"],
'compiler_f90' : ["pathf95"],
'linker_so' : ["pathf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='pathf95').get_version())
| PathScaleFCompiler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride2.py | {
"start": 139,
"end": 788
} | class ____:
def f1(self, *, kwarg0: int) -> None: ...
def f2(self, *, kwarg0: int) -> None: ...
def f3(self, *, kwarg0: int) -> None: ...
def f4(self, *, kwarg0: int) -> None: ...
def g1(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def g2(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def g3(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def g4(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def g5(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def g6(self, a: int, /, b: str, *, kwarg0: int) -> None: ...
def h1(self, a: int, *args: int) -> None: ...
| Base1 |
python | pypa__pipenv | pipenv/vendor/pipdeptree/_models/dag.py | {
"start": 10056,
"end": 11302
} | class ____(PackageDAG):
"""
Representation of Package dependencies in the reverse order.
Similar to it's super class `PackageDAG`, the underlying datastructure is a dict, but here the keys are expected to
be of type `ReqPackage` and each item in the values of type `DistPackage`.
Typically, this object will be obtained by calling `PackageDAG.reverse`.
"""
def reverse(self) -> PackageDAG: # type: ignore[override]
"""
Reverse the already reversed DAG to get the PackageDAG again.
:returns: reverse of the reversed DAG
"""
m: defaultdict[DistPackage, list[ReqPackage]] = defaultdict(list)
child_keys = {r.key for r in chain.from_iterable(self._obj.values())}
for k, vs in self._obj.items():
for v in vs:
assert isinstance(v, DistPackage)
node = next((p for p in m if p.key == v.key), v.as_parent_of(None))
m[node].append(k)
if k.key not in child_keys:
assert isinstance(k, ReqPackage)
assert k.dist is not None
m[k.dist] = []
return PackageDAG(dict(m))
__all__ = [
"PackageDAG",
"ReversedPackageDAG",
]
| ReversedPackageDAG |
python | walkccc__LeetCode | solutions/70. Climbing Stairs/70.py | {
"start": 0,
"end": 234
} | class ____:
def climbStairs(self, n: int) -> int:
# dp[i] := the number of ways to climb to the i-th stair
dp = [1, 1] + [0] * (n - 1)
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
| Solution |
python | imageio__imageio | imageio/plugins/simpleitk.py | {
"start": 1782,
"end": 4106
} | class ____(Format):
"""See :mod:`imageio.plugins.simpleitk`"""
def _can_read(self, request):
# If the request is a format that only this plugin can handle,
# we report that we can do it; a useful error will be raised
# when simpleitk is not installed. For the more common formats
# we only report that we can read if the library is installed.
if request.extension in ITK_FORMATS:
return True
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
return request.extension in ALL_FORMATS
def _can_write(self, request):
if request.extension in ITK_FORMATS:
return True
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
return request.extension in ALL_FORMATS
# -- reader
class Reader(Format.Reader):
def _open(self, pixel_type=None, fallback_only=None, **kwargs):
if not _itk:
load_lib()
args = ()
if pixel_type is not None:
args += (pixel_type,)
if fallback_only is not None:
args += (fallback_only,)
self._img = _read_function(self.request.get_local_filename(), *args)
def _get_length(self):
return 1
def _close(self):
pass
def _get_data(self, index):
# Get data
if index != 0:
error_msg = "Index out of range while reading from itk file"
raise IndexError(error_msg)
# Return array and empty meta data
return _itk.GetArrayFromImage(self._img), {}
def _get_meta_data(self, index):
error_msg = "The itk plugin does not support meta data, currently."
raise RuntimeError(error_msg)
# -- writer
class Writer(Format.Writer):
def _open(self):
if not _itk:
load_lib()
def _close(self):
pass
def _append_data(self, im, meta):
_itk_img = _itk.GetImageFromArray(im)
_write_function(_itk_img, self.request.get_local_filename())
def set_meta_data(self, meta):
error_msg = "The itk plugin does not support meta data, currently."
raise RuntimeError(error_msg)
| ItkFormat |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 67770,
"end": 75719
} | class ____(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
# Turn off masking
if config.do_mask_input:
logger.warning("Setting `do_mask_input` parameter to False.")
config.do_mask_input = False
self.model = PatchTSTModel(config)
if config.loss == "mse":
self.distribution_output = None
else:
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.prediction_length)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.prediction_length)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.head = PatchTSTPredictionHead(
config, self.model.patchifier.num_patches, distribution_output=self.distribution_output
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForPredictionOutput]:
r"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Prediction task with 7 input channels and prediction length is 96
>>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values, the model outputs future values
>>> outputs = model(past_values=batch["past_values"])
>>> prediction_outputs = outputs.prediction_outputs
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# get model output
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
# get output head
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if self.distribution_output:
y_hat_out = y_hat
else:
y_hat_out = y_hat * model_output.scale + model_output.loc
if future_values is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(
y_hat, loc=model_output.loc, scale=model_output.scale
)
loss_val = nll(distribution, future_values)
# take average of the loss
loss_val = weighted_average(loss_val)
else:
loss = nn.MSELoss(reduction="mean")
loss_val = loss(y_hat_out, future_values)
loc = model_output.loc
scale = model_output.scale
if not return_dict:
outputs = (y_hat_out,) + model_output[1:-1]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForPredictionOutput(
loss=loss_val,
prediction_outputs=y_hat_out,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
loc=loc,
scale=scale,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
) -> SamplePatchTSTOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)`
for multivariate predictions.
"""
# get number of samples
num_parallel_samples = self.config.num_parallel_samples
# get model output
outputs = self(
past_values=past_values,
future_values=None,
past_observed_mask=past_observed_mask,
output_hidden_states=False,
)
if self.distribution_output:
# get distribution
distribution = self.distribution_output.distribution(
outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale
)
# get samples: list of [bs x forecast_len x num_channels]
samples = [distribution.sample() for _ in range(num_parallel_samples)]
# samples: [bs x num_samples x forecast_len x num_channels]
samples = torch.stack(samples, dim=1)
else:
samples = outputs.prediction_outputs.unsqueeze(1)
return SamplePatchTSTOutput(sequences=samples)
| PatchTSTForPrediction |
python | huggingface__transformers | src/transformers/models/roc_bert/modeling_roc_bert.py | {
"start": 1857,
"end": 9063
} | class ____(nn.Module):
"""Construct the embeddings from word, position, shape, pronunciation and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.pronunciation_embed = nn.Embedding(
config.pronunciation_vocab_size, config.pronunciation_embed_dim, padding_idx=config.pad_token_id
)
self.shape_embed = nn.Embedding(
config.shape_vocab_size, config.shape_embed_dim, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.enable_pronunciation = config.enable_pronunciation
self.enable_shape = config.enable_shape
if config.concat_input:
input_dim = config.hidden_size
if self.enable_pronunciation:
pronunciation_dim = config.pronunciation_embed_dim
input_dim += pronunciation_dim
if self.enable_shape:
shape_dim = config.shape_embed_dim
input_dim += shape_dim
self.map_inputs_layer = torch.nn.Linear(input_dim, config.hidden_size)
else:
self.map_inputs_layer = None
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
def forward(
self,
input_ids=None,
input_shape_ids=None,
input_pronunciation_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if self.map_inputs_layer is None:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
denominator = 1
embedding_in = torch.clone(embeddings)
if self.enable_shape and input_shape_ids is not None:
embedding_shape = self.shape_embed(input_shape_ids)
embedding_in += embedding_shape
denominator += 1
if self.enable_pronunciation and input_pronunciation_ids is not None:
embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids)
embedding_in += embedding_pronunciation
denominator += 1
embedding_in /= denominator
return embedding_in
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids) # embedding_word
device = inputs_embeds.device
embedding_in = torch.clone(inputs_embeds)
if self.enable_shape:
if input_shape_ids is None:
input_shape_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_shape = self.shape_embed(input_shape_ids)
embedding_in = torch.cat((embedding_in, embedding_shape), -1)
if self.enable_pronunciation:
if input_pronunciation_ids is None:
input_pronunciation_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids)
embedding_in = torch.cat((embedding_in, embedding_pronunciation), -1)
embedding_in = self.map_inputs_layer(embedding_in) # batch_size * seq_len * hidden_dim
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embedding_in += token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embedding_in += position_embeddings
embedding_in = self.LayerNorm(embedding_in)
embedding_in = self.dropout(embedding_in)
return embedding_in
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RoCBert
| RoCBertEmbeddings |
python | ray-project__ray | python/ray/tests/runtime_env_container/test_serve_basic.py | {
"start": 653,
"end": 1056
} | class ____:
def __call__(self):
with open("file.txt") as f:
return f.read().strip()
def check_application(app_handle: DeploymentHandle, expected: str):
ref = app_handle.remote()
assert ref.result() == expected
return True
h = serve.run(Model.bind())
wait_for_condition(
check_application,
app_handle=h,
expected="helloworldalice",
timeout=300,
)
| Model |
python | getsentry__sentry | tests/sentry/seer/autofix/test_autofix.py | {
"start": 970,
"end": 12343
} | class ____(TestCase):
def test_convert_profile_to_execution_tree(self) -> None:
profile_data = {
"profile": {
"frames": [
{
"function": "main",
"module": "app.main",
"filename": "main.py",
"lineno": 10,
"in_app": True,
},
{
"function": "helper",
"module": "app.utils",
"filename": "utils.py",
"lineno": 20,
"in_app": True,
},
{
"function": "external",
"module": "external.lib",
"filename": "lib.py",
"lineno": 30,
"in_app": False,
},
],
"stacks": [
[2, 1, 0]
], # One stack with three frames. In a call stack, the first function is the last frame
"samples": [{"stack_id": 0, "thread_id": "1", "elapsed_since_start_ns": 10000000}],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
execution_tree, selected_thread_id = _convert_profile_to_execution_tree(profile_data)
# Should only include in_app frames from the selected thread (MainThread in this case)
assert selected_thread_id == "1"
assert len(execution_tree) == 1 # One root node
root = execution_tree[0]
assert root["function"] == "main"
assert root["module"] == "app.main"
assert root["filename"] == "main.py"
assert root["lineno"] == 10
assert len(root["children"]) == 1
child = root["children"][0]
assert child["function"] == "helper"
assert child["module"] == "app.utils"
assert child["filename"] == "utils.py"
assert child["lineno"] == 20
assert len(child["children"]) == 0 # No children for the last in_app frame
def test_convert_profile_to_execution_tree_non_main_thread(self) -> None:
"""Test that the thread with in_app frames is selected (even if not MainThread)"""
profile_data = {
"profile": {
"frames": [
{
"function": "worker",
"module": "app.worker",
"filename": "worker.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [{"stack_id": 0, "thread_id": "2", "elapsed_since_start_ns": 10000000}],
"thread_metadata": {"2": {"name": "WorkerThread"}, "3": {"name": "WorkerThread2"}},
}
}
execution_tree, selected_thread_id = _convert_profile_to_execution_tree(profile_data)
# Should include the worker thread since it has in_app frames
assert selected_thread_id == "2"
assert len(execution_tree) == 1
assert execution_tree[0]["function"] == "worker"
assert execution_tree[0]["filename"] == "worker.py"
def test_convert_profile_to_execution_tree_merges_duplicate_frames(self) -> None:
"""Test that duplicate frames in different samples are merged correctly"""
profile_data = {
"profile": {
"frames": [
{
"function": "main",
"module": "app.main",
"filename": "main.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0], [0]], # Two stacks with the same frame
"samples": [
{"stack_id": 0, "thread_id": "1", "elapsed_since_start_ns": 10000000},
{"stack_id": 1, "thread_id": "1", "elapsed_since_start_ns": 20000000},
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
execution_tree, selected_thread_id = _convert_profile_to_execution_tree(profile_data)
# Should only have one node even though frame appears in multiple samples
assert selected_thread_id == "1"
assert len(execution_tree) == 1
assert execution_tree[0]["function"] == "main"
def test_convert_profile_to_execution_tree_calculates_durations(self) -> None:
"""Test that durations are correctly calculated for nodes in the execution tree"""
profile_data = {
"profile": {
"frames": [
{
"function": "main",
"module": "app.main",
"filename": "main.py",
"lineno": 10,
"in_app": True,
},
{
"function": "process_data",
"module": "app.processing",
"filename": "processing.py",
"lineno": 25,
"in_app": True,
},
{
"function": "save_result",
"module": "app.storage",
"filename": "storage.py",
"lineno": 50,
"in_app": True,
},
],
# Three stacks representing a call sequence: main → process_data → save_result → process_data → main
"stacks": [
[0], # main only
[1, 0], # main → process_data
[2, 1, 0], # main → process_data → save_result
[1, 0], # main → process_data (returned from save_result)
[0], # main only (returned from process_data)
],
# 5 samples at 10ms intervals
"samples": [
{
"stack_id": 0,
"thread_id": "1",
"elapsed_since_start_ns": 10000000,
}, # 10ms: main
{
"stack_id": 1,
"thread_id": "1",
"elapsed_since_start_ns": 20000000,
}, # 20ms: main → process_data
{
"stack_id": 2,
"thread_id": "1",
"elapsed_since_start_ns": 30000000,
}, # 30ms: main → process_data → save_result
{
"stack_id": 1,
"thread_id": "1",
"elapsed_since_start_ns": 40000000,
}, # 40ms: main → process_data
{
"stack_id": 0,
"thread_id": "1",
"elapsed_since_start_ns": 50000000,
}, # 50ms: main
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
execution_tree, selected_thread_id = _convert_profile_to_execution_tree(profile_data)
# Should have one root node (main)
assert selected_thread_id == "1"
assert len(execution_tree) == 1
root = execution_tree[0]
assert root["function"] == "main"
# Check root duration - should span the entire profile (50ms - 10ms + 10ms interval = 50ms)
assert root["duration_ns"] == 50000000
# Check process_data duration - should be active from 20ms to 40ms (20ms + 10ms interval = 30ms)
assert len(root["children"]) == 1
process_data = root["children"][0]
assert process_data["function"] == "process_data"
assert process_data["duration_ns"] == 30000000
# Check save_result duration - should be active only at 30ms (10ms interval = 10ms)
assert len(process_data["children"]) == 1
save_result = process_data["children"][0]
assert save_result["function"] == "save_result"
assert save_result["duration_ns"] == 10000000
def test_convert_profile_to_execution_tree_with_timestamp(self) -> None:
"""Test that _convert_profile_to_execution_tree works with continuous profiles using timestamp"""
profile_data = {
"profile": {
"frames": [
{
"function": "main",
"module": "app.main",
"filename": "main.py",
"lineno": 10,
"in_app": True,
},
{
"function": "helper",
"module": "app.utils",
"filename": "utils.py",
"lineno": 20,
"in_app": True,
},
],
"stacks": [
[0], # main only
[1, 0], # main → helper
],
# Samples using timestamp instead of elapsed_since_start_ns
"samples": [
{
"stack_id": 0,
"thread_id": "1",
"timestamp": 1672567200.0, # Base timestamp (Unix timestamp)
},
{
"stack_id": 1,
"thread_id": "1",
"timestamp": 1672567200.01, # 10ms later
},
{
"stack_id": 0,
"thread_id": "1",
"timestamp": 1672567200.02, # 20ms later
},
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
execution_tree, selected_thread_id = _convert_profile_to_execution_tree(profile_data)
# Should have one root node (main)
assert selected_thread_id == "1"
assert len(execution_tree) == 1
root = execution_tree[0]
assert root["function"] == "main"
assert root["module"] == "app.main"
assert root["filename"] == "main.py"
assert root["lineno"] == 10
# Should have one child (helper)
assert len(root["children"]) == 1
child = root["children"][0]
assert child["function"] == "helper"
assert child["module"] == "app.utils"
assert child["filename"] == "utils.py"
assert child["lineno"] == 20
assert len(child["children"]) == 0
# Check durations are calculated correctly from timestamps
# Root should span from 0ns to 20ms (0.02s * 1e9 = 20000000ns) + interval
# Allow for small floating point precision differences
assert abs(root["duration_ns"] - 30000000) < 100 # 20ms + 10ms interval
# Helper should be active from 10ms to 10ms (10ms interval = 10000000ns)
assert abs(child["duration_ns"] - 10000000) < 100
@pytest.mark.django_db
| TestConvertProfileToExecutionTree |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_histogram.py | {
"start": 553,
"end": 1467
} | class ____(serializers.Serializer):
span = serializers.CharField(required=True, allow_null=False)
query = serializers.CharField(required=False)
numBuckets = serializers.IntegerField(min_value=1, max_value=100)
precision = serializers.IntegerField(default=0, min_value=0, max_value=4)
min = serializers.FloatField(required=False)
max = serializers.FloatField(required=False)
dataFilter = serializers.ChoiceField(choices=DATA_FILTERS, required=False)
def validate(self, data):
if "min" in data and "max" in data and data["min"] > data["max"]:
raise serializers.ValidationError("min cannot be greater than max.")
return data
def validate_span(self, span: str) -> Span:
try:
return Span.from_str(span)
except ValueError as e:
raise serializers.ValidationError(str(e))
@region_silo_endpoint
| SpansHistogramSerializer |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 94424,
"end": 94531
} | class ____(BaseModel):
class NestedModel(BaseModel):
a: Decimal
nested: NestedModel
| ModelOne |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_issues_resolved_in_release.py | {
"start": 334,
"end": 5535
} | class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-release-resolved"
method = "get"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=self.user, teams=[self.team])
self.project = self.create_project(teams=[self.team])
self.project_2 = self.create_project(teams=[self.team])
self.release = self.create_release(project=self.project)
self.environment = self.create_environment(project=self.project)
self.environment.add_project(self.project_2)
self.environment_2 = self.create_environment(project=self.project)
self.group = self.create_group(project=self.project)
self.group_2 = self.create_group(project=self.project_2)
self.login_as(self.user)
def build_grouplink(self, group=None):
group = self.group if group is None else group
repo = Repository.objects.create(organization_id=self.org.id, name=group.project.name)
commit = Commit.objects.create(
organization_id=self.org.id, repository_id=repo.id, key=uuid1().hex
)
commit_2 = Commit.objects.create(
organization_id=self.org.id, repository_id=repo.id, key=uuid1().hex
)
ReleaseCommit.objects.create(
organization_id=self.org.id, release=self.release, commit=commit, order=commit.id
)
ReleaseCommit.objects.create(
organization_id=self.org.id, release=self.release, commit=commit_2, order=commit_2.id
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
def build_group_resolution(self, group=None):
return GroupResolution.objects.create(
group=self.group if group is None else group,
release=self.release,
type=GroupResolution.Type.in_release,
)
def run_test(self, expected_groups, project_ids=None, environment_names=None):
params = {}
if project_ids:
params["project"] = project_ids
if environment_names:
params["environment"] = environment_names
response = self.get_success_response(self.org.slug, self.release.version, **params)
assert len(response.data) == len(expected_groups)
expected = set(map(str, [g.id for g in expected_groups]))
assert {item["id"] for item in response.data} == expected
def test_shows_issues_from_groupresolution(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupResolution model
"""
self.build_group_resolution()
self.run_test([self.group])
def test_shows_issues_from_grouplink(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink model
"""
self.build_grouplink()
self.run_test([self.group])
def test_does_not_return_duplicate_groups(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink and GroupResolution model
but will not return the groups twice if they appear in both
"""
self.build_grouplink()
self.build_group_resolution()
self.run_test([self.group])
def test_return_groups_from_both_types(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from both the GroupLink and GroupResolution model
"""
self.build_grouplink()
new_group = self.create_group(project=self.project)
self.build_group_resolution(new_group)
self.run_test([self.group, new_group])
def test_multiple_projects(self) -> None:
"""
Test that the endpoint will return issues resolved in a release across
projects in the org, and that filtering by project works as expected
"""
self.build_grouplink()
self.build_grouplink(self.group_2)
self.run_test([self.group, self.group_2])
self.run_test([self.group], project_ids=[self.group.project_id])
self.run_test([self.group_2], project_ids=[self.group_2.project_id])
self.run_test(
[self.group, self.group_2], project_ids=[self.group.project_id, self.group_2.project_id]
)
def test_multiple_envs_projects(self) -> None:
"""
Test that the endpoint will work correctly if multiple envs are passed
"""
self.build_grouplink()
self.build_grouplink(self.group_2)
self.run_test(
[self.group],
project_ids=[self.group.project_id],
environment_names=[self.environment.name, self.environment_2.name],
)
| OrganizationIssuesResolvedInReleaseEndpointTest |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 22473,
"end": 22955
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
@auto_docstring
| BertPreTrainingHeads |
python | pytorch__pytorch | benchmarks/gpt_fast/mixtral_moe_model.py | {
"start": 2426,
"end": 4531
} | class ____(nn.Module):
def __init__(self, config: ModelArgs) -> None:
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
self.layers = nn.ModuleList(
TransformerBlock(config) for _ in range(config.n_layer)
)
self.norm = RMSNorm(config.dim, eps=config.norm_eps)
self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
self.freqs_cis: Optional[Tensor] = None
self.mask_cache: Optional[Tensor] = None
self.max_batch_size = -1
self.max_seq_length = -1
def setup_caches(self, max_batch_size, max_seq_length):
if (
self.max_seq_length >= max_seq_length
and self.max_batch_size >= max_batch_size
):
return
head_dim = self.config.dim // self.config.n_head
max_seq_length = find_multiple(max_seq_length, 8)
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
for b in self.layers:
b.attention.kv_cache = KVCache(
max_batch_size, max_seq_length, self.config.n_local_heads, head_dim
)
self.freqs_cis = precompute_freqs_cis(
self.config.block_size,
self.config.dim // self.config.n_head,
self.config.rope_base,
)
self.causal_mask = torch.tril(
torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool)
)
def forward(self, idx: Tensor, input_pos: Optional[Tensor] = None) -> Tensor:
assert self.freqs_cis is not None, "Caches must be initialized first"
mask = self.causal_mask[None, None, input_pos]
freqs_cis = self.freqs_cis[input_pos]
x = self.tok_embeddings(idx)
for i, layer in enumerate(self.layers):
x = layer(x, input_pos, freqs_cis, mask)
x = self.norm(x)
logits = self.output(x)
return logits
@classmethod
def from_name(cls, name: str):
return cls(ModelArgs.from_name(name))
| Transformer |
python | plotly__plotly.py | plotly/graph_objs/layout/coloraxis/_colorbar.py | {
"start": 235,
"end": 61668
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.coloraxis"
_path_str = "layout.coloraxis.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.coloraxis.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.coloraxis.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.layout.coloraxis.co
lorbar.tickformatstopdefaults), sets the default property
values to use for elements of
layout.coloraxis.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.coloraxis.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.layout.coloraxis.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.coloraxi
s.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.colo
raxis.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
layout.coloraxis.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.coloraxis.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.coloraxis.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.coloraxi
s.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.colo
raxis.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
layout.coloraxis.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.coloraxis.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.coloraxis.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.coloraxis.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/custom_reports_validator.py | {
"start": 1219,
"end": 3210
} | class ____:
"""
ERRORS_MAPPING holds an external `Pydantic.ValidationError` types and their placeholders.
{
key: str = <Pydantic.ValidationError Type>,
value: tuple(str, list) = (<explainable message>, <list as placeholder>
}
"""
errors_mapping = {
"value_error.missing": ("fields required", []),
"value_error.extra": ("fields not permitted", []),
"type_error": ("type errors", []),
"value_error": ("incorrect field reference, expected format `ga:MY_FIELD_NAME`, but got", []),
}
def parse(self, errors: List[Dict]) -> str:
for error in errors:
field_name, error_type, error_msg = error.get("loc")[0], error.get("type"), error.get("msg")
# general errors
if error_type in self.errors_mapping:
# value errors
if error_type == "value_error":
self.errors_mapping.get(error_type)[1].append({"field": field_name, "reference": error_msg})
# general errors
else:
self.errors_mapping.get(error_type)[1].append(field_name)
# type errors
if "type_error" in error_type:
error_type, _type = error_type.split(".")
self.errors_mapping.get(error_type)[1].append((field_name, f"{_type} is required"))
def explain(self, errors: List[Dict]):
"""
General Errors are explained first.
Such as:
- missing required field
- presence of non-permitted fields
Type Errors are explained last.
If model attribute has invalid type provided, like list, but str was required and etc:
- str is required,
- ...
"""
self.parse(errors)
for error_type in self.errors_mapping:
msg, errors = self.errors_mapping.get(error_type)
if errors:
return f"{msg} {errors}"
@dataclass
| Explainer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/function7.py | {
"start": 279,
"end": 497
} | class ____:
def write(self, a: str, /, b: str):
pass
def make_writer1(w: _Writer1):
pass
# This should generate an error because the source function is positional-only.
make_writer1(Writer1())
| Writer1 |
python | kamyu104__LeetCode-Solutions | Python/average-value-of-even-numbers-that-are-divisible-by-three.py | {
"start": 36,
"end": 344
} | class ____(object):
def averageValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = cnt = 0
for x in nums:
if x%6:
continue
total += x
cnt += 1
return total//cnt if cnt else 0
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_api_applications.py | {
"start": 875,
"end": 1227
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-api-applications")
response = self.client.post(url, data={})
assert response.status_code == 201
assert ApiApplication.objects.get(client_id=response.data["id"], owner=self.user)
| ApiApplicationsCreateTest |
python | sphinx-doc__sphinx | sphinx/domains/index.py | {
"start": 3108,
"end": 4358
} | class ____(ReferenceRole):
def run(self) -> tuple[list[Node], list[system_message]]:
target_id = 'index-%s' % self.env.new_serialno('index')
if self.has_explicit_title:
# if an explicit target is given, process it as a full entry
title = self.title
entries = process_index_entry(self.target, target_id)
else:
# otherwise we just create a single entry
if self.target.startswith('!'):
title = self.title[1:]
entries = [('single', self.target[1:], target_id, 'main', None)]
else:
title = self.title
entries = [('single', self.target, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
text = nodes.Text(title)
self.set_source_info(index)
return [index, target, text], []
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_domain(IndexDomain)
app.add_directive('index', IndexDirective)
app.add_role('index', IndexRole())
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| IndexRole |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_linked_artifacts.py | {
"start": 378,
"end": 552
} | class ____(GQLResult):
artifact_memberships: FetchLinkedArtifactsArtifactArtifactMemberships = Field(
alias="artifactMemberships"
)
| FetchLinkedArtifactsArtifact |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ridge.py | {
"start": 46378,
"end": 55519
} | class ____(_RidgeClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-4
The precision of the solution (`coef_`) is determined by `tol` which
specifies a different convergence criterion for each solver:
- 'svd': `tol` has no impact.
- 'cholesky': `tol` has no impact.
- 'sparse_cg': norm of residuals smaller than `tol`.
- 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr,
which control the norm of the residual vector in terms of the norms of
matrix and coefficients.
- 'sag' and 'saga': relative change of coef smaller than `tol`.
- 'lbfgs': maximum of the absolute (projected) gradient=max|residuals|
smaller than `tol`.
.. versionchanged:: 1.2
Default value changed from 1e-3 to 1e-4 for consistency with other linear
models.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. It is the most stable solver, in particular more stable
for singular matrices than 'cholesky' at the cost of being slower.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
solver_ : str
The solver that was used at fit time by the computational
routines.
.. versionadded:: 1.5
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
_parameter_constraints: dict = {
**_BaseRidge._parameter_constraints,
"class_weight": [dict, StrOptions({"balanced"}), None],
}
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
copy_X=True,
max_iter=None,
tol=1e-4,
class_weight=None,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
self.class_weight = class_weight
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to RidgeClassifier.
Returns
-------
self : object
Instance of the estimator.
"""
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)
super().fit(X, Y, sample_weight=sample_weight)
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = (self.solver != "svd") and (
self.solver != "cholesky" or not self.fit_intercept
)
return tags
def _check_gcv_mode(X, gcv_mode):
if gcv_mode in ["eigen", "svd"]:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return "svd"
return "eigen"
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
xp, _ = get_namespace(query)
abs_cosine = xp.abs(query @ vectors)
index = xp.argmax(abs_cosine)
return index
| RidgeClassifier |
python | run-llama__llama_index | llama-index-core/llama_index/core/postprocessor/node.py | {
"start": 4681,
"end": 9102
} | class ____(BaseNodePostprocessor):
"""
Previous/Next Node post-processor.
Allows users to fetch additional nodes from the document store,
based on the relationships of the nodes.
NOTE: this is a beta feature.
Args:
docstore (BaseDocumentStore): The document store.
num_nodes (int): The number of nodes to return (default: 1)
mode (str): The mode of the post-processor.
Can be "previous", "next", or "both.
"""
docstore: BaseDocumentStore
num_nodes: int = Field(default=1)
mode: str = Field(default="next")
@field_validator("mode")
@classmethod
def _validate_mode(cls, v: str) -> str:
"""Validate mode."""
if v not in ["next", "previous", "both"]:
raise ValueError(f"Invalid mode: {v}")
return v
@classmethod
def class_name(cls) -> str:
return "PrevNextNodePostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
all_nodes: Dict[str, NodeWithScore] = {}
for node in nodes:
all_nodes[node.node.node_id] = node
if self.mode == "next":
all_nodes.update(get_forward_nodes(node, self.num_nodes, self.docstore))
elif self.mode == "previous":
all_nodes.update(
get_backward_nodes(node, self.num_nodes, self.docstore)
)
elif self.mode == "both":
all_nodes.update(get_forward_nodes(node, self.num_nodes, self.docstore))
all_nodes.update(
get_backward_nodes(node, self.num_nodes, self.docstore)
)
else:
raise ValueError(f"Invalid mode: {self.mode}")
all_nodes_values: List[NodeWithScore] = list(all_nodes.values())
sorted_nodes: List[NodeWithScore] = []
for node in all_nodes_values:
# variable to check if cand node is inserted
node_inserted = False
for i, cand in enumerate(sorted_nodes):
node_id = node.node.node_id
# prepend to current candidate
prev_node_info = cand.node.prev_node
next_node_info = cand.node.next_node
if prev_node_info is not None and node_id == prev_node_info.node_id:
node_inserted = True
sorted_nodes.insert(i, node)
break
# append to current candidate
elif next_node_info is not None and node_id == next_node_info.node_id:
node_inserted = True
sorted_nodes.insert(i + 1, node)
break
if not node_inserted:
sorted_nodes.append(node)
return sorted_nodes
DEFAULT_INFER_PREV_NEXT_TMPL = (
"The current context information is provided. \n"
"A question is also provided. \n"
"You are a retrieval agent deciding whether to search the "
"document store for additional prior context or future context. \n"
"Given the context and question, return PREVIOUS or NEXT or NONE. \n"
"Examples: \n\n"
"Context: Describes the author's experience at Y Combinator."
"Question: What did the author do after his time at Y Combinator? \n"
"Answer: NEXT \n\n"
"Context: Describes the author's experience at Y Combinator."
"Question: What did the author do before his time at Y Combinator? \n"
"Answer: PREVIOUS \n\n"
"Context: Describe the author's experience at Y Combinator."
"Question: What did the author do at Y Combinator? \n"
"Answer: NONE \n\n"
"Context: {context_str}\n"
"Question: {query_str}\n"
"Answer: "
)
DEFAULT_REFINE_INFER_PREV_NEXT_TMPL = (
"The current context information is provided. \n"
"A question is also provided. \n"
"An existing answer is also provided.\n"
"You are a retrieval agent deciding whether to search the "
"document store for additional prior context or future context. \n"
"Given the context, question, and previous answer, "
"return PREVIOUS or NEXT or NONE.\n"
"Examples: \n\n"
"Context: {context_msg}\n"
"Question: {query_str}\n"
"Existing Answer: {existing_answer}\n"
"Answer: "
)
| PrevNextNodePostprocessor |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/io.py | {
"start": 30544,
"end": 33219
} | class ____(DataSourceInfo):
"""
In-memory DataFrame source information.
Parameters
----------
df
In-memory DataFrame source.
stats_planning
Statistics planning options.
"""
def __init__(
self,
df: pl.DataFrame,
stats_planning: StatsPlanningOptions,
):
self._pdf = df
self._stats_planning = stats_planning
self._key_columns: set[str] = set()
self._unique_stats_columns = set()
self._unique_stats: dict[str, UniqueStats] = {}
@functools.cached_property
def row_count(self) -> ColumnStat[int]:
"""Data source row-count estimate."""
return ColumnStat[int](value=self._pdf.height, exact=True)
def _update_unique_stats(self, column: str) -> None:
if column not in self._unique_stats and self._stats_planning.use_sampling:
row_count = self.row_count.value
try:
unique_count = (
self._pdf._df.get_column(column).approx_n_unique()
if row_count
else 0
)
except pl.exceptions.InvalidOperationError: # pragma: no cover
unique_count = self._pdf._df.get_column(column).n_unique()
unique_fraction = min((unique_count / row_count), 1.0) if row_count else 1.0
self._unique_stats[column] = UniqueStats(
ColumnStat[int](value=unique_count),
ColumnStat[float](value=unique_fraction),
)
def unique_stats(self, column: str) -> UniqueStats:
"""Return unique-value statistics for a column."""
self._update_unique_stats(column)
return self._unique_stats.get(column, UniqueStats())
def _extract_dataframescan_stats(
ir: DataFrameScan, config_options: ConfigOptions
) -> dict[str, ColumnStats]:
"""Extract base ColumnStats for a DataFrameScan node."""
assert config_options.executor.name == "streaming", (
"Only streaming executor is supported in _extract_dataframescan_stats"
)
table_source_info = DataFrameSourceInfo(
pl.DataFrame._from_pydf(ir.df),
config_options.executor.stats_planning,
)
return {
name: ColumnStats(
name=name,
source_info=ColumnSourceInfo(DataSourcePair(table_source_info, name)),
)
for name in ir.schema
}
def _clear_source_info_cache() -> None:
"""Clear DataSourceInfo caches."""
# TODO: Avoid clearing the cache if we can
# check that the underlying data hasn't changed.
# Clear ParquetSourceInfo cache
_sample_pq_stats.cache_clear()
| DataFrameSourceInfo |
python | imageio__imageio | imageio/core/request.py | {
"start": 996,
"end": 1985
} | class ____(str, enum.Enum):
"""Available Image modes
This is a helper enum for ``Request.Mode`` which is a composite of a
``Request.ImageMode`` and ``Request.IOMode``. The image mode that tells the
plugin the desired (and expected) image shape. Available values are
- single_image ("i"): Return a single image extending in two spacial
dimensions
- multi_image ("I"): Return a list of images extending in two spacial
dimensions
- single_volume ("v"): Return an image extending into multiple dimensions.
E.g. three spacial dimensions for image stacks, or two spatial and one
time dimension for videos
- multi_volume ("V"): Return a list of images extending into multiple
dimensions.
- any_mode ("?"): Return an image in any format (the plugin decides the
appropriate action).
"""
single_image = "i"
multi_image = "I"
single_volume = "v"
multi_volume = "V"
any_mode = "?"
@enum.unique
| ImageMode |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 45044,
"end": 45422
} | class ____(FieldValues):
valid_inputs = {
'0.12345': Decimal('0.12345'),
}
invalid_inputs = {
'0.1234567': ['Ensure that there are no more than 6 digits in total.']
}
outputs = {
'1.2345': '1.2345',
'0': '0',
'1.1': '1.1',
}
field = serializers.DecimalField(max_digits=6, decimal_places=None)
| TestNoDecimalPlaces |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_restore.py | {
"start": 9584,
"end": 21845
} | class ____(Callback):
callbacks = []
def on_fit_start(self, trainer, pl_module):
self.callbacks = deepcopy(trainer.callbacks)
@RunIf(sklearn=True)
def test_callbacks_state_fit_ckpt_path(tmp_path):
"""Test that resuming from a checkpoint restores callbacks that persist state."""
dm = ClassifDataModule()
model = ClassificationModel()
callback_capture = CaptureCallbacksBeforeTraining()
def get_trainer_args():
checkpoint = ModelCheckpoint(dirpath=tmp_path, monitor="val_loss", save_last=True)
assert checkpoint.best_model_path == ""
assert checkpoint.best_model_score is None
return {
"default_root_dir": tmp_path,
"limit_train_batches": 1,
"limit_val_batches": 2,
"max_epochs": 1,
"logger": False,
"callbacks": [checkpoint, callback_capture],
}
# initial training
trainer = Trainer(**get_trainer_args())
trainer.fit(model, datamodule=dm)
callbacks_before_resume = deepcopy(trainer.callbacks)
# resumed training
trainer = Trainer(**get_trainer_args())
trainer.fit(model, datamodule=dm, ckpt_path=str(tmp_path / "last.ckpt"))
assert len(callbacks_before_resume) == len(callback_capture.callbacks)
for before, after in zip(callbacks_before_resume, callback_capture.callbacks):
if isinstance(before, ModelCheckpoint):
for attribute in (
"best_model_path",
"best_model_score",
"best_k_models",
"kth_best_model_path",
"kth_value",
"last_model_path",
):
assert getattr(before, attribute) == getattr(after, attribute), f"{attribute}"
@RunIf(sklearn=True)
def test_callbacks_references_fit_ckpt_path(tmp_path):
"""Test that resuming from a checkpoint sets references as expected."""
dm = ClassifDataModule()
model = ClassificationModel()
args = {
"default_root_dir": tmp_path,
"max_steps": 1,
"logger": False,
"limit_val_batches": 2,
"num_sanity_val_steps": 0,
}
# initial training
checkpoint = ModelCheckpoint(dirpath=tmp_path, monitor="val_loss", save_last=True)
trainer = Trainer(**args, callbacks=[checkpoint])
assert checkpoint is trainer.callbacks[-1] is trainer.checkpoint_callback
trainer.fit(model, datamodule=dm)
# resumed training
new_checkpoint = ModelCheckpoint(dirpath=tmp_path, monitor="val_loss", save_last=True)
# pass in a new checkpoint object, which should take
# precedence over the one in the last.ckpt file
trainer = Trainer(**args, callbacks=[new_checkpoint])
assert checkpoint is not new_checkpoint
assert new_checkpoint is trainer.callbacks[-1] is trainer.checkpoint_callback
trainer.fit(model, datamodule=dm, ckpt_path=str(tmp_path / "last.ckpt"))
@RunIf(min_cuda_gpus=2, sklearn=True)
def test_running_test_pretrained_model_distrib_ddp_spawn(tmp_path):
"""Verify `test()` on pretrained model."""
dm = ClassifDataModule()
model = ClassificationModel()
# exp file to get meta
logger = tutils.get_default_logger(tmp_path)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = {
"enable_progress_bar": False,
"max_epochs": 2,
"limit_train_batches": 2,
"limit_val_batches": 2,
"callbacks": [checkpoint],
"logger": logger,
"accelerator": "gpu",
"devices": [0, 1],
"strategy": "ddp_spawn",
"default_root_dir": tmp_path,
}
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model, datamodule=dm)
log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmp_path)))
# correct result and ok accuracy
assert trainer.state.finished, f"Training failed with {trainer.state}"
pretrained_model = ClassificationModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
# run test set
new_trainer = Trainer(**trainer_options)
new_trainer.test(pretrained_model, datamodule=dm)
pretrained_model.cpu()
dataloaders = dm.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
tpipes.run_model_prediction(pretrained_model, dataloader, min_acc=0.1)
@RunIf(sklearn=True)
def test_running_test_pretrained_model_cpu(tmp_path):
"""Verify test() on pretrained model."""
seed_everything(1)
dm = ClassifDataModule()
model = ClassificationModel()
# logger file to get meta
logger = tutils.get_default_logger(tmp_path)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = {
"enable_progress_bar": False,
"max_epochs": 2,
"limit_train_batches": 2,
"limit_val_batches": 2,
"limit_test_batches": 2,
"callbacks": [checkpoint],
"logger": logger,
"default_root_dir": tmp_path,
}
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model, datamodule=dm)
# correct result and ok accuracy
assert trainer.state.finished, f"Training failed with {trainer.state}"
pretrained_model = ClassificationModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
new_trainer = Trainer(**trainer_options)
new_trainer.test(pretrained_model, datamodule=dm)
# test we have good test accuracy
tutils.assert_ok_model_acc(new_trainer, key="test_acc", thr=0.45)
@pytest.mark.parametrize("model_template", [ValTestLossBoringModel, GenericValTestLossBoringModel])
def test_load_model_from_checkpoint(tmp_path, model_template):
"""Verify test() on pretrained model."""
model = model_template()
trainer_options = {
"enable_progress_bar": False,
"max_epochs": 2,
"limit_train_batches": 2,
"limit_val_batches": 2,
"limit_test_batches": 2,
"callbacks": [ModelCheckpoint(dirpath=tmp_path, monitor="val_loss", save_top_k=-1)],
"default_root_dir": tmp_path,
"accelerator": "cpu",
}
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model)
trainer.test(model)
# correct result and ok accuracy
assert trainer.state.finished, f"Training failed with {trainer.state}"
# load last checkpoint
last_checkpoint = sorted(glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, "*.ckpt")))[-1]
# Since `BoringModel` has `_save_hparams = True` by default, check that ckpt has hparams
ckpt = torch.load(last_checkpoint, weights_only=True)
assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt, "hyper_parameters missing from checkpoints"
# Ensure that model can be correctly restored from checkpoint
pretrained_model = model_template.load_from_checkpoint(last_checkpoint)
# test that hparams loaded correctly
for k, v in model.hparams.items():
assert getattr(pretrained_model.hparams, k) == v
# assert weights are the same
for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()):
assert torch.all(torch.eq(old_p, new_p)), "loaded weights are not the same as the saved weights"
# Check `test` on pretrained model:
new_trainer = Trainer(**trainer_options)
new_trainer.test(pretrained_model)
def test_model_saving_loading(tmp_path):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
model = BoringModel()
# logger file to get meta
logger = tutils.get_default_logger(tmp_path)
# fit model
trainer = Trainer(
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmp_path)],
default_root_dir=tmp_path,
)
trainer.fit(model)
# make a prediction
dataloaders = model.test_dataloader()
batch = next(iter(dataloaders))
# generate preds before saving model
model.eval()
pred_before_saving = model(batch)
# save model
new_weights_path = os.path.join(tmp_path, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmp_path)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
model_2 = BoringModel.load_from_checkpoint(checkpoint_path=new_weights_path, hparams_file=hparams_path)
model_2.eval()
# make prediction
# assert that both predictions are the same
new_pred = model_2(batch)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load_more_params(monkeypatch, tmp_path, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmp_path
monkeypatch.setenv("TORCH_HOME", tmp_path)
model = BoringModel()
# Extra layer
model.c_d3 = torch.nn.Linear(32, 32)
# logger file to get meta
logger = tutils.get_default_logger(tmp_path)
# fit model
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmp_path)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmp_path, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmp_path), "hparams.yaml")
hparams_url = f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
ckpt_path = hparams_url if url_ckpt else new_weights_path
BoringModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False)
with pytest.raises(RuntimeError, match=r'Unexpected key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
BoringModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True)
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load_less_params(monkeypatch, tmp_path, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmp_path
monkeypatch.setenv("TORCH_HOME", tmp_path)
model = BoringModel()
# logger file to get meta
logger = tutils.get_default_logger(tmp_path)
# fit model
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmp_path)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmp_path, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmp_path), "hparams.yaml")
ckpt_url = f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
ckpt_path = ckpt_url if url_ckpt else new_weights_path
class CurrentModel(BoringModel):
def __init__(self):
super().__init__()
self.c_d3 = torch.nn.Linear(7, 7)
CurrentModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False)
with pytest.raises(RuntimeError, match=r'Missing key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
CurrentModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True)
def test_model_pickle(tmp_path):
model = BoringModel()
pickle.dumps(model)
cloudpickle.dumps(model)
| CaptureCallbacksBeforeTraining |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/operators/test_display_video.py | {
"start": 1455,
"end": 4264
} | class ____:
@mock.patch("airflow.providers.google.marketing_platform.operators.display_video.zipfile")
@mock.patch("airflow.providers.google.marketing_platform.operators.display_video.os")
@mock.patch(
"airflow.providers.google.marketing_platform.operators.display_video.tempfile.TemporaryDirectory"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.display_video.GCSHook")
@mock.patch(
"airflow.providers.google.marketing_platform.operators.display_video.GoogleDisplayVideo360Hook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators.display_video.open",
new_callable=mock.mock_open,
)
def test_execute(self, mock_open, mock_hook, gcs_hook_mock, temp_dir_mock, os_mock, zipfile_mock):
operation = {"response": {"resourceName": RESOURCE_NAME}}
media = mock.Mock()
mock_hook.return_value.get_sdf_download_operation.return_value = operation
mock_hook.return_value.download_media.return_value = media
tmp_dir = "/tmp/mock_dir"
temp_dir_mock.return_value.__enter__.return_value = tmp_dir
# Mock os behavior
os_mock.path.join.side_effect = lambda *args: "/".join(args)
os_mock.listdir.return_value = [FILENAME]
# Mock zipfile behavior
zipfile_mock.ZipFile.return_value.__enter__.return_value.extractall.return_value = None
op = GoogleDisplayVideo360SDFtoGCSOperator(
operation_name=OPERATION_NAME,
bucket_name=BUCKET_NAME,
object_name=OBJECT_NAME,
gzip=False,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
task_id="test_task",
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(context=None)
# Assertions
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_sdf_download_operation.assert_called_once_with(
operation_name=OPERATION_NAME
)
mock_hook.return_value.download_media.assert_called_once_with(resource_name=RESOURCE_NAME)
mock_hook.return_value.download_content_from_request.assert_called_once()
gcs_hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=BUCKET_NAME,
object_name=OBJECT_NAME,
filename=f"{tmp_dir}/{FILENAME}",
gzip=False,
)
assert result == f"{BUCKET_NAME}/{OBJECT_NAME}"
| TestGoogleDisplayVideo360SDFtoGCSOperator |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modeling_qwen3_moe.py | {
"start": 17677,
"end": 20699
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3MoeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Qwen3MoeConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| Qwen3MoeRotaryEmbedding |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 6743,
"end": 7799
} | class ____(ClassifierMixin, BaseEstimator):
"""A classifier which accepts no metadata on any method."""
def __init__(self, alpha=0.0):
self.alpha = alpha
def fit(self, X, y):
self.classes_ = np.unique(y)
self.coef_ = np.ones_like(X)
return self
def partial_fit(self, X, y, classes=None):
return self
def decision_function(self, X):
return self.predict(X)
def predict(self, X):
y_pred = np.empty(shape=(len(X),))
y_pred[: len(X) // 2] = 0
y_pred[len(X) // 2 :] = 1
return y_pred
def predict_proba(self, X):
# dummy probabilities to support predict_proba
y_proba = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float32)
# each row sums up to 1.0:
y_proba[:] = np.random.dirichlet(alpha=np.ones(len(self.classes_)), size=len(X))
return y_proba
def predict_log_proba(self, X):
# dummy probabilities to support predict_log_proba
return self.predict_proba(X)
| NonConsumingClassifier |
python | walkccc__LeetCode | solutions/1973. Count Nodes Equal to Sum of Descendants/1973.py | {
"start": 60,
"end": 96
} | class ____:
summ: int
count: int
| T |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_step_function.py | {
"start": 1063,
"end": 3585
} | class ____:
def test_get_conn_returns_a_boto3_connection(self):
hook = StepFunctionHook(aws_conn_id="aws_default")
assert hook.get_conn().meta.service_model.service_name == "stepfunctions"
def test_start_execution(self):
hook = StepFunctionHook(aws_conn_id="aws_default", region_name="us-east-1")
state_machine = hook.get_conn().create_state_machine(
name="pseudo-state-machine", definition="{}", roleArn="arn:aws:iam::000000000000:role/Role"
)
state_machine_arn = state_machine.get("stateMachineArn")
execution_arn = hook.start_execution(
state_machine_arn=state_machine_arn, name=None, state_machine_input={}
)
assert execution_arn is not None
@mock.patch.object(StepFunctionHook, "conn")
def test_redrive_execution(self, mock_conn):
mock_conn.redrive_execution.return_value = {"redriveDate": datetime(2024, 1, 1)}
StepFunctionHook().start_execution(
state_machine_arn="arn:aws:states:us-east-1:123456789012:stateMachine:test-state-machine",
name="random-123",
is_redrive_execution=True,
)
mock_conn.redrive_execution.assert_called_once_with(
executionArn="arn:aws:states:us-east-1:123456789012:execution:test-state-machine:random-123"
)
@mock.patch.object(StepFunctionHook, "conn")
def test_redrive_execution_without_name_should_fail(self, mock_conn):
mock_conn.redrive_execution.return_value = {"redriveDate": datetime(2024, 1, 1)}
with pytest.raises(
AirflowFailException, match="Execution name is required to start RedriveExecution"
):
StepFunctionHook().start_execution(
state_machine_arn="arn:aws:states:us-east-1:123456789012:stateMachine:test-state-machine",
is_redrive_execution=True,
)
def test_describe_execution(self):
hook = StepFunctionHook(aws_conn_id="aws_default", region_name="us-east-1")
state_machine = hook.get_conn().create_state_machine(
name="pseudo-state-machine", definition="{}", roleArn="arn:aws:iam::000000000000:role/Role"
)
state_machine_arn = state_machine.get("stateMachineArn")
execution_arn = hook.start_execution(
state_machine_arn=state_machine_arn, name=None, state_machine_input={}
)
response = hook.describe_execution(execution_arn)
assert "input" in response
| TestStepFunctionHook |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 10388,
"end": 11759
} | class ____(nn.Module):
"""
Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY
POSITION EMBEDDING', Please see https://huggingface.co/papers/2104.09864v1.pdf .
"""
def __init__(self, config):
super().__init__()
dim = max(config.projection_dim // (config.num_attention_heads * 2), 32)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.cached_sequence_length = None
self.cached_rotary_positional_embedding = None
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
sequence_length = hidden_states.shape[1]
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
return self.cached_rotary_positional_embedding
self.cached_sequence_length = sequence_length
time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq)
freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
embeddings = torch.cat((freqs, freqs), dim=-1)
self.cached_rotary_positional_embedding = embeddings.unsqueeze(0)
return self.cached_rotary_positional_embedding
| ClvpRotaryPositionalEmbedding |
python | google__jax | tests/multiprocess/axis_index_test.py | {
"start": 692,
"end": 1023
} | class ____(jt_multiprocess.MultiProcessTest):
def test(self):
f = jax.pmap(lambda _: lax.axis_index("i"), axis_name="i")
n = jax.local_device_count()
xs = np.arange(n)
out = f(xs * 0)
np.testing.assert_equal(out, xs + (n * jax.process_index()))
if __name__ == "__main__":
jt_multiprocess.main()
| AxisIndexTest |
python | wandb__wandb | wandb/sdk/launch/registry/anon.py | {
"start": 168,
"end": 943
} | class ____(AbstractRegistry):
def __init__(self, uri: str) -> None:
"""Initialize the registry."""
self.uri = uri
async def get_username_password(self) -> Tuple[str, str]:
"""Get the username and password for the registry."""
raise NotImplementedError("Anonymous registry does not require authentication")
async def get_repo_uri(self) -> str:
return self.uri
async def check_image_exists(self, image_uri: str) -> bool:
"""Check if an image exists in the registry."""
if not is_docker_installed():
return False
return docker_image_exists(image_uri)
@classmethod
def from_config(cls, config: dict) -> "AbstractRegistry":
return cls(uri=config["uri"])
| AnonynmousRegistry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.