language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/trace_type_test.py | {
"start": 16784,
"end": 20598
} | class ____(test.Benchmark):
def benchmarkTensor(self):
shapes = [[1], [2, 19], [5, 11, 24], [4, 5, 9, 23]]
tensors = []
for s in shapes:
tensors.append(array_ops.zeros(s))
def encode_tensors(tensors):
trace_type.from_value(tensors)
iterations = 100000
t = timeit.timeit(lambda: encode_tensors(tensors), number=iterations)
self.report_benchmark(
name='tensor_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'tensor_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkTensorSpec(self):
shapes = [[1], [2, 19], [5, 11, 24], [4, 5, 9, 23]]
tensor_specs = []
for s in shapes:
tensor_specs.append(tensor_spec.TensorSpec(s, dtypes.int32))
def encode_tensor_specs(tensor_specs):
trace_type.from_value(tensor_specs)
iterations = 100000
t = timeit.timeit(
lambda: encode_tensor_specs(tensor_specs), number=iterations)
self.report_benchmark(
name='tensor_spec_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'tensor_spec_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkVariable(self):
var_list = [
variables.Variable(1.0),
variables.Variable(1),
variables.Variable([1])
]
def encode_variables(var_list):
trace_type.from_value(var_list)
iterations = 10000
t = timeit.timeit(lambda: encode_variables(var_list), number=iterations)
self.report_benchmark(
name='variable_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'variable_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkTraceTypeLookup(self):
@def_function.function
def defined(t):
return t
call_arg_list = [
1,
array_ops.zeros([5, 13]),
array_ops.zeros([9, 22, 24]),
array_ops.zeros([5, 13, 2])
]
for c in call_arg_list:
defined(c)
lookup_call_arg = array_ops.zeros([5, 13])
iterations = 10000
t = timeit.timeit(stmt=lambda: defined(lookup_call_arg), number=iterations)
self.report_benchmark(
name='cache_key_lookup',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'cache_key_lookup_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkNestedStruct(self):
struct = {(1, 2, 3): {(1, 2): {12: 2}}, (3, 2, 3): (2, {2: 3})}
def encode_struct(struct):
trace_type.from_value(struct)
iterations = 100000
t = timeit.timeit(lambda: encode_struct(struct), number=iterations)
self.report_benchmark(
name='nested_struct_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'nested_struct_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkFunctionInvocation(self):
struct = (variables.Variable(1.0), array_ops.zeros([5, 13]), {
'tensor': array_ops.zeros([5, 20]),
'variable': variables.Variable(1.0)
})
@def_function.function
def defined(t):
return t
defined(struct) # Get it traced and cached.
iterations = 10000
t = timeit.timeit(lambda: defined(struct), number=iterations)
self.report_benchmark(
name='function_invocation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'function_invocation_time_avg_ms',
'value': t / iterations * 1000
}])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TraceTypeGenerationBenchmark |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enumAuto1.py | {
"start": 82,
"end": 307
} | class ____(Enum):
ALWAYS = auto()
NEVER = auto()
AUTO = auto()
a: CacheBehavior = CacheBehavior.ALWAYS
b: CacheBehavior = CacheBehavior["ALWAYS"]
foo = "A" + "UTO"
c: CacheBehavior = CacheBehavior[foo]
| CacheBehavior |
python | pytorch__pytorch | test/quantization/pt2e/test_quantize_pt2e.py | {
"start": 105894,
"end": 116833
} | class ____(PT2EQuantizationTestCase):
def test_channel_group_quantization(self):
from torch.ao.quantization.observer import MappingType, PerGroup, PerToken
from torch.ao.quantization.pt2e._affine_quantization import (
AffineQuantizedMinMaxObserver,
)
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
for node in model.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.linear.default
):
input_act = node.args[0]
assert isinstance(input_act, Node)
weight = node.args[1]
assert isinstance(weight, Node)
act_qspec = QuantizationSpec(
dtype=torch.uint8,
quant_min=0,
quant_max=255,
qscheme=None,
is_dynamic=False,
observer_or_fake_quant_ctr=AffineQuantizedMinMaxObserver.with_args(
# TODO: maybe align the arg name here
target_dtype=torch.uint8,
mapping_type=MappingType.SYMMETRIC,
granularity=PerToken(),
),
)
weight_qspec = QuantizationSpec(
dtype=torch.uint8,
quant_min=0,
quant_max=255,
qscheme=None,
is_dynamic=False,
observer_or_fake_quant_ctr=AffineQuantizedMinMaxObserver.with_args(
target_dtype=torch.uint8,
mapping_type=MappingType.SYMMETRIC,
granularity=PerGroup(group_size=128),
),
)
node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: act_qspec,
weight: weight_qspec,
},
_annotated=True,
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(128, 20)
def forward(self, x):
return self.linear(x)
node_occurrence = {
torch.ops.pt2e_quant.quantize_affine: 1,
torch.ops.pt2e_quant.dequantize_affine: 2,
}
node_list = [
torch.ops.pt2e_quant.quantize_affine,
torch.ops.pt2e_quant.dequantize_affine,
]
example_inputs = (torch.randn(5, 128),)
self._test_quantizer(
M().eval(),
example_inputs,
BackendAQuantizer(),
node_occurrence,
node_list,
is_debug_mode=True,
)
def test_dynamic_affine_act_per_channel_weights(self):
import operator
from torch.ao.quantization.observer import (
MappingType,
PerChannelMinMaxObserver,
PerToken,
)
from torch.ao.quantization.pt2e._affine_quantization import (
AffineQuantizedMovingAverageMinMaxObserver,
)
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
for node in model.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.linear.default
):
input_act = node.args[0]
assert isinstance(input_act, Node)
weight = node.args[1]
assert isinstance(weight, Node)
activation_dtype = torch.int8
act_qspec = QuantizationSpec(
dtype=activation_dtype,
quant_min=-128,
quant_max=127,
qscheme=None,
is_dynamic=True,
observer_or_fake_quant_ctr=AffineQuantizedMovingAverageMinMaxObserver.with_args(
# TODO: maybe align the arg name here
target_dtype=activation_dtype,
mapping_type=MappingType.SYMMETRIC,
granularity=PerToken(),
averaging_constant=1,
),
)
weight_qspec = QuantizationSpec(
dtype=torch.int8,
quant_min=-127,
quant_max=127,
qscheme=torch.per_channel_symmetric,
ch_axis=0,
is_dynamic=False,
observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(),
)
node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: act_qspec,
weight: weight_qspec,
},
_annotated=True,
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(128, 20)
def forward(self, x):
return self.linear(x)
node_occurrence = {
torch.ops.pt2e_quant.choose_qparams_affine: 1,
operator.getitem: 2,
torch.ops.pt2e_quant.quantize_affine: 1,
torch.ops.pt2e_quant.dequantize_affine: 1,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_channel.default,
torch.ops.pt2e_quant.choose_qparams_affine,
operator.getitem,
torch.ops.pt2e_quant.quantize_affine,
torch.ops.pt2e_quant.dequantize_affine,
]
example_inputs = (torch.randn(5, 128),)
self._test_quantizer(
M().eval(),
example_inputs,
BackendAQuantizer(),
node_occurrence,
node_list,
is_debug_mode=True,
)
def test_dynamic_per_tok_act_per_group_weights(self):
import operator
from torch.ao.quantization.observer import MappingType, PerGroup, PerToken
from torch.ao.quantization.pt2e._affine_quantization import (
AffineQuantizedMinMaxObserver,
AffineQuantizedPlaceholderObserver,
)
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
for node in model.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.linear.default
):
input_act = node.args[0]
assert isinstance(input_act, Node)
weight = node.args[1]
assert isinstance(weight, Node)
activation_dtype = torch.int8
act_qspec = QuantizationSpec(
dtype=activation_dtype,
quant_min=-128,
quant_max=127,
qscheme=None,
is_dynamic=True,
observer_or_fake_quant_ctr=AffineQuantizedPlaceholderObserver.with_args(
# TODO: maybe align the arg name here
target_dtype=activation_dtype,
mapping_type=MappingType.SYMMETRIC,
granularity=PerToken(),
),
)
weight_qspec = QuantizationSpec(
dtype=torch.int8,
quant_min=-127,
quant_max=127,
qscheme=torch.per_channel_symmetric,
ch_axis=0,
is_dynamic=False,
observer_or_fake_quant_ctr=AffineQuantizedMinMaxObserver.with_args(
target_dtype=torch.int8,
mapping_type=MappingType.SYMMETRIC,
granularity=PerGroup(group_size=128),
),
)
node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: act_qspec,
weight: weight_qspec,
},
_annotated=True,
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(128, 20)
def forward(self, x):
return self.linear(x)
node_occurrence = {
torch.ops.pt2e_quant.choose_qparams_affine: 1,
operator.getitem: 2,
torch.ops.pt2e_quant.quantize_affine: 1,
torch.ops.pt2e_quant.dequantize_affine: 2,
}
node_list = [
torch.ops.pt2e_quant.dequantize_affine,
torch.ops.pt2e_quant.choose_qparams_affine,
operator.getitem,
torch.ops.pt2e_quant.quantize_affine,
torch.ops.pt2e_quant.dequantize_affine,
]
example_inputs = (torch.randn(5, 128),)
self._test_quantizer(
M().eval(),
example_inputs,
BackendAQuantizer(),
node_occurrence,
node_list,
is_debug_mode=True,
)
instantiate_parametrized_tests(TestQuantizePT2E)
| TestQuantizePT2EAffineQuantization |
python | pytorch__pytorch | torch/_dynamo/_trace_wrapped_higher_order_op.py | {
"start": 6127,
"end": 9234
} | class ____(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("trace_wrapped")
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return super().__call__(*args, **kwargs)
# TODO(jansel): need to ensure this does not get DCEed
_trace_wrapped_op = TraceWrapped()
def _assert_meta(
grad: torch.Tensor,
size: tuple[int, ...],
stride: tuple[int, ...],
dtype: torch.dtype,
) -> torch.Tensor:
assert grad.size() == size, "size mismatch"
assert grad.stride() == stride, "stride mismatch"
assert grad.dtype == dtype, "dtype mismatch"
return grad
@_trace_wrapped_op.py_impl(ProxyTorchDispatchMode)
def inner_trace(
mode: ProxyTorchDispatchMode,
*args: Any,
bw_state: Optional[BackwardState] = None,
**kwargs: Any,
) -> Any:
def self_invoke(*args: Any, **dyn_kwargs: Any) -> Any:
with torch.no_grad():
return _trace_wrapped_op(*args, **dyn_kwargs, **kwargs)
def unwrap_proxies(x: Any) -> Any:
if isinstance(x, torch.Tensor):
return mode.tracer.unwrap_proxy(x) # type: ignore[union-attr]
if isinstance(x, (list, tuple)):
return type(x)(map(unwrap_proxies, x))
if x is None:
return None
raise AssertionError(f"unhandled type: {type(x)}")
proxy_kwargs = {}
if bw_state is not None:
assert isinstance(bw_state, BackwardState) and bw_state.proxy is not None
proxy_kwargs["bw_state"] = bw_state.proxy
out_proxy = mode.tracer.create_proxy(
"call_function",
self_invoke,
unwrap_proxies(args),
proxy_kwargs,
name="trace_wrapped",
)
if args[0] is None:
grad = args[1] # module backward hooks
else:
grad = args[0] # other backward hooks
grad = tree_map_only(torch.Tensor, torch.empty_like, grad)
track_tensor_tree(grad, out_proxy, constant=None, tracer=mode.tracer)
return grad
@_trace_wrapped_op.py_impl(FakeTensorMode)
def inner_fake(*args: Any, **kwargs: Any) -> None:
raise RuntimeError("This op should never be invoked here")
@_trace_wrapped_op.py_impl(DispatchKey.CompositeExplicitAutograd)
def _trace_wrapped_op_dense(*args: Any, fn: Any, **kwargs: Any) -> Any:
mode = _get_current_dispatch_mode()
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
return fn(*args, **kwargs)
_trace_wrapped_op.py_impl(DispatchKey.Autograd)(
autograd_not_implemented(_trace_wrapped_op, deferred_error=True)
)
@_trace_wrapped_op.py_functionalize_impl
def _trace_wrapped_functionalized(ctx: Any, *args: Any, **kwargs: Any) -> Any:
unwrapped_args = ctx.unwrap_tensors(args)
with ctx.redispatch_to_next():
return ctx.wrap_tensors(_trace_wrapped_op(*unwrapped_args, **kwargs))
def autograd_function_backward_rewritten(original_backward: Any) -> Any:
def new_backward(ctx: Any, *grads: Any) -> Any:
# pyrefly: ignore [bad-assignment]
grads = [g.contiguous() for g in grads]
return original_backward(ctx, *grads)
return new_backward
| TraceWrapped |
python | pytorch__pytorch | torch/testing/_internal/inductor_utils.py | {
"start": 11039,
"end": 13564
} | class ____(GraphLowering):
"""Minimal mock graph handler for testing virtualized context."""
def __init__(self, name_to_buffer=None):
import torch._inductor.sizevars
self.sizevars = torch._inductor.sizevars.SizeVarAllocator()
self.name_to_buffer = name_to_buffer or {}
self.graph_inputs = {}
self.mutated_buffers = OrderedSet()
self.removed_buffers = OrderedSet()
self.constants = {}
self.scheduler = None
def get_dtype(self, buffer_name: str) -> torch.dtype: # noqa: ARG002
"""Return default dtype for any buffer (for testing)."""
return torch.float32
@contextlib.contextmanager
def patch_inductor_backend(
device: str,
python_wrapper_codegen: PythonWrapperCodegen = None,
custom_pass: CustomGraphModulePass = None,
custom_backend_config: ConfigModule = None,
):
"""
Patch the inductor backend for a specific device.
"""
# Make sure the backend is already registered
init_backend_registration()
# Get the original registration parameters
original_scheduling = get_scheduling_for_device(device)
original_python_wrapper = get_wrapper_codegen_for_device(device, False)
original_cpp_wrapper = get_wrapper_codegen_for_device(device, True)
original_fx_wrapper = get_wrapper_codegen_for_device(device, fx_wrapper=True)
original_custom_pass = get_custom_backend_pass_for_device(device)
original_custom_backend_config = get_custom_backend_config_for_device(device)
try:
# Register modified backend for the device
register_backend_for_device(
device,
original_scheduling,
(
python_wrapper_codegen
if python_wrapper_codegen is not None
else original_python_wrapper
),
original_cpp_wrapper,
original_fx_wrapper,
custom_pass if custom_pass is not None else original_custom_pass,
(
custom_backend_config
if custom_backend_config is not None
else original_custom_backend_config
),
)
yield
finally:
# Restore the original backend
register_backend_for_device(
device,
original_scheduling,
original_python_wrapper,
original_cpp_wrapper,
original_fx_wrapper,
original_custom_pass,
original_custom_backend_config,
)
| MockGraphHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass1.py | {
"start": 1120,
"end": 1195
} | class ____:
aaa: str
ddd: InitVar[int] = 3
@dataclass(init=False)
| DC3 |
python | keras-team__keras | keras/src/ops/image.py | {
"start": 5272,
"end": 7706
} | class ____(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.hsv_to_rgb(images, data_format=self.data_format)
def compute_output_spec(self, images):
images_shape = list(images.shape)
dtype = images.dtype
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={dtype}"
)
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.hsv_to_rgb")
def hsv_to_rgb(images, data_format=None):
"""Convert HSV images to RGB.
`images` must be of float dtype, and the output is only well defined if the
values in `images` are in `[0, 1]`.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
RGB image or batch of RGB images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.hsv_to_rgb(x)
>>> y.shape
(2, 4, 4, 3)
>>> x = np.random.random((4, 4, 3)) # Single HSV image
>>> y = ops.image.hsv_to_rgb(x)
>>> y.shape
(4, 4, 3)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.hsv_to_rgb(x, data_format="channels_first")
>>> y.shape
(2, 3, 4, 4)
"""
if any_symbolic_tensors((images,)):
return HSVToRGB(data_format=data_format).symbolic_call(images)
return backend.image.hsv_to_rgb(images, data_format=data_format)
| HSVToRGB |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_tuple.py | {
"start": 667,
"end": 2203
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
from test import support
import seq_tests
import unittest
import gc
import pickle
# For tuple hashes, we normally only run a test to ensure that we get
# the same results across platforms in a handful of cases. If that's
# so, there's no real point to running more. Set RUN_ALL_HASH_TESTS to
# run more anyway. That's usually of real interest only when analyzing,
# or changing, the hash algorithm. In which case it's usually also
# most useful to set JUST_SHOW_HASH_RESULTS, to see all the results
# instead of wrestling with test "failures". See the bottom of the
# file for extensive notes on what we're testing here and why.
RUN_ALL_HASH_TESTS = False
JUST_SHOW_HASH_RESULTS = False # if RUN_ALL_HASH_TESTS, just display
| RedirectImportFinder |
python | django__django | tests/model_forms/models.py | {
"start": 3982,
"end": 4237
} | class ____(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, "been_saved", False)
assert not been_here, "save_form_data called more than once"
setattr(self, "been_saved", True)
| CustomFileField |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/generic.py | {
"start": 1132,
"end": 1317
} | class ____(ScopedResourceMixin, ClientProtectedResourceView):
"""Impose scope restrictions if client protection fallsback to access token."""
pass
| ClientProtectedScopedResourceView |
python | ray-project__ray | python/ray/llm/tests/batch/cpu/stages/test_stage_base.py | {
"start": 1087,
"end": 3659
} | class ____:
class SimpleUDF(StatefulStageUDF):
def __init__(
self,
data_column: str,
expected_input_keys: Optional[List[str]] = None,
udf_output_missing_idx_in_batch_column: bool = False,
):
super().__init__(data_column, expected_input_keys)
self.udf_output_missing_idx_in_batch_column = (
udf_output_missing_idx_in_batch_column
)
async def udf(
self, rows: list[Dict[str, Any]]
) -> AsyncIterator[Dict[str, Any]]:
# Intentionally output in a reversed order to test OOO.
for row in rows[::-1]:
ret = {"processed": row["value"] * 2}
if not self.udf_output_missing_idx_in_batch_column:
ret[self.IDX_IN_BATCH_COLUMN] = row[self.IDX_IN_BATCH_COLUMN]
yield ret
@pytest.mark.asyncio
async def test_basic_processing(self):
udf = self.SimpleUDF(data_column="__data", expected_input_keys=["value"])
batch = {
"__data": [{"value": 1, "extra": 10}, {"value": 2, "extra": 20}],
}
results = []
async for result in udf(batch):
results.extend(result["__data"])
assert len(results) == 2
for data in results:
val = data["value"]
assert data["processed"] == val * 2
assert data["extra"] == 10 * val
assert data["value"] == val
@pytest.mark.asyncio
async def test_missing_data_column(self):
udf = self.SimpleUDF(data_column="__data", expected_input_keys=["value"])
batch = {"extra": ["a"]}
with pytest.raises(ValueError):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
async def test_missing_required_key(self):
udf = self.SimpleUDF(data_column="__data", expected_input_keys=["value"])
batch = {"__data": [{"wrong_key": 1}]}
with pytest.raises(ValueError):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
async def test_missing_idx_in_batch_column(self):
udf = self.SimpleUDF(
data_column="__data",
expected_input_keys=["value"],
udf_output_missing_idx_in_batch_column=True,
)
batch = {"__data": [{"value": 1}]}
with pytest.raises(ValueError):
async for _ in udf(batch):
pass
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestStatefulStageUDF |
python | keras-team__keras | keras/src/layers/layer_test.py | {
"start": 538,
"end": 949
} | class ____:
"""Mock remat by returning a wrapper Mock calling the original function"""
def __init__(self):
self.rematted_functions = {}
def __call__(self, func):
if func in self.rematted_functions:
return self.rematted_functions[func]
wrapped_func = mock.Mock(wraps=func)
self.rematted_functions[func] = wrapped_func
return wrapped_func
| MockRemat |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 14489,
"end": 20337
} | class ____(NonStrictDataModel):
"""
An entire plot (not single datapoint) and it's layout.
Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.
:param timestamp: Epoch milliseconds UTC, will be set by the server if not set.
:type timestamp: float
:param task: Task ID (required)
:type task: str
:param iter: Iteration
:type iter: int
:param metric: Metric name, e.g. 'count', 'loss', 'accuracy'
:type metric: str
:param variant: E.g. 'class_1', 'total', 'average
:type variant: str
:param plot_str: An entire plot (not single datapoint) and it's layout. Used
for plotting ROC curves, confidence matrices, etc. when evaluating the net.
:type plot_str: str
:param skip_validation: If set then plot_str is not checked for a valid json.
The default is False
:type skip_validation: bool
"""
_schema = {
"description": " An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"plot_str": {
"description": "An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.\n ",
"type": "string",
},
"skip_validation": {
"description": "If set then plot_str is not checked for a valid json. The default is False",
"type": "boolean",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "plot", "description": "'plot'"},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
}
def __init__(
self,
task: str,
timestamp: Optional[float] = None,
iter: Optional[int] = None,
metric: Optional[str] = None,
variant: Optional[str] = None,
plot_str: Optional[str] = None,
skip_validation: Optional[bool] = None,
**kwargs: Any
) -> None:
super(MetricsPlotEvent, self).__init__(**kwargs)
self.timestamp = timestamp
self.task = task
self.iter = iter
self.metric = metric
self.variant = variant
self.plot_str = plot_str
self.skip_validation = skip_validation
@schema_property("timestamp")
def timestamp(self) -> Optional[float]:
return self._property_timestamp
@timestamp.setter
def timestamp(self, value: Optional[float]) -> None:
if value is None:
self._property_timestamp = None
return
self.assert_isinstance(value, "timestamp", six.integer_types + (float,))
self._property_timestamp = value
@schema_property("type")
def type(self) -> Any:
return "plot"
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iter")
def iter(self) -> Optional[int]:
return self._property_iter
@iter.setter
def iter(self, value: Optional[int]) -> None:
if value is None:
self._property_iter = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iter", six.integer_types)
self._property_iter = value
@schema_property("metric")
def metric(self) -> Optional[str]:
return self._property_metric
@metric.setter
def metric(self, value: Optional[str]) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variant")
def variant(self) -> Optional[str]:
return self._property_variant
@variant.setter
def variant(self, value: Optional[str]) -> None:
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
@schema_property("plot_str")
def plot_str(self) -> Optional[str]:
return self._property_plot_str
@plot_str.setter
def plot_str(self, value: Optional[str]) -> None:
if value is None:
self._property_plot_str = None
return
self.assert_isinstance(value, "plot_str", six.string_types)
self._property_plot_str = value
@schema_property("skip_validation")
def skip_validation(self) -> Optional[bool]:
return self._property_skip_validation
@skip_validation.setter
def skip_validation(self, value: Optional[bool]) -> None:
if value is None:
self._property_skip_validation = None
return
self.assert_isinstance(value, "skip_validation", (bool,))
self._property_skip_validation = value
| MetricsPlotEvent |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 48649,
"end": 51578
} | class ____(Field):
default_error_messages = {
'invalid': _('Duration has wrong format. Use one of these formats instead: {format}.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'overflow': _('The number of days must be between {min_days} and {max_days}.'),
}
def __init__(self, *, format=empty, **kwargs):
self.max_value = kwargs.pop('max_value', None)
self.min_value = kwargs.pop('min_value', None)
if format is not empty:
if format is None or (isinstance(format, str) and format.lower() in (ISO_8601, DJANGO_DURATION_FORMAT)):
self.format = format
elif isinstance(format, str):
raise ValueError(
f"Unknown duration format provided, got '{format}'"
" while expecting 'django', 'iso-8601' or `None`."
)
else:
raise TypeError(
"duration format must be either str or `None`,"
f" not {type(format).__name__}"
)
super().__init__(**kwargs)
if self.max_value is not None:
message = lazy_format(self.error_messages['max_value'], max_value=self.max_value)
self.validators.append(
MaxValueValidator(self.max_value, message=message))
if self.min_value is not None:
message = lazy_format(self.error_messages['min_value'], min_value=self.min_value)
self.validators.append(
MinValueValidator(self.min_value, message=message))
def to_internal_value(self, value):
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(str(value))
except OverflowError:
self.fail('overflow', min_days=datetime.timedelta.min.days, max_days=datetime.timedelta.max.days)
if parsed is not None:
return parsed
self.fail('invalid', format='[DD] [HH:[MM:]]ss[.uuuuuu]')
def to_representation(self, value):
output_format = getattr(self, 'format', api_settings.DURATION_FORMAT)
if output_format is None:
return value
if isinstance(output_format, str):
if output_format.lower() == ISO_8601:
return duration_iso_string(value)
if output_format.lower() == DJANGO_DURATION_FORMAT:
return duration_string(value)
raise ValueError(
f"Unknown duration format provided, got '{output_format}'"
" while expecting 'django', 'iso-8601' or `None`."
)
raise TypeError(
"duration format must be either str or `None`,"
f" not {type(output_format).__name__}"
)
# Choice types...
| DurationField |
python | pandas-dev__pandas | asv_bench/benchmarks/timeseries.py | {
"start": 3490,
"end": 4095
} | class ____:
params = [date_range, period_range, timedelta_range]
param_names = ["time_index"]
def setup(self, time_index):
N = 10**6
if time_index is timedelta_range:
self.idx = time_index(start=0, freq="min", periods=N)
else:
self.idx = time_index(start="20140101", freq="min", periods=N)
self.exit = 10000
def time_iter(self, time_index):
for _ in self.idx:
pass
def time_iter_preexit(self, time_index):
for i, _ in enumerate(self.idx):
if i > self.exit:
break
| Iteration |
python | dateutil__dateutil | src/dateutil/tz/__init__.py | {
"start": 325,
"end": 444
} | class ____(Warning):
"""Warning raised when time zones are parsed from deprecated formats."""
| DeprecatedTzFormatWarning |
python | chroma-core__chroma | chromadb/errors.py | {
"start": 1448,
"end": 1574
} | class ____(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidUUID"
| InvalidUUIDError |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/test_base_workflow.py | {
"start": 13755,
"end": 28078
} | class ____(TestCase):
def setUp(self) -> None:
self.context = {
"organization": self.organization,
"request": self.make_request(),
}
self.integration, self.org_integration = self.create_provider_integration_for(
provider="slack", organization=self.organization, user=self.user
)
self.action_filters = [
{
"actions": [
{
"type": Action.Type.SLACK,
"config": {
"target_identifier": "foo",
"target_display": "bar",
"target_type": "specific",
},
"data": {},
"integrationId": self.integration.id,
}
],
"logicType": "any",
"conditions": [],
"organizationId": self.organization.id,
}
]
self.valid_data = {
"name": "test",
"enabled": True,
"actionFilters": self.action_filters,
"config": {
"frequency": 30,
},
"triggers": {
"logicType": "any",
"conditions": [
{
"type": Condition.EQUAL,
"comparison": 1,
"condition_result": True,
},
],
},
}
validator = WorkflowValidator(
data=self.valid_data,
context=self.context,
)
with mock.patch(
"sentry.notifications.notification_action.registry.action_validator_registry.get",
return_value=MockActionValidatorTranslator,
):
validator.is_valid(raise_exception=True)
self.workflow = validator.create(validator.validated_data)
self.context["workflow"] = self.workflow
serializer = WorkflowSerializer()
attrs = serializer.get_attrs([self.workflow], self.user)
self.valid_saved_data = serializer.serialize(self.workflow, attrs[self.workflow], self.user)
def test_update_property(self, mock_action_validator: mock.MagicMock) -> None:
self.valid_data["name"] = "Update Test"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.update(self.workflow, validator.validated_data)
assert workflow.id == self.workflow.id
assert workflow.name == "Update Test"
def test_update__remove_trigger_conditions(self, mock_action_validator: mock.MagicMock) -> None:
assert self.workflow.when_condition_group
self.valid_saved_data["triggers"] = {
"id": self.workflow.when_condition_group.id,
"logicType": "any",
"conditions": [],
}
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
assert self.workflow.when_condition_group is not None
assert self.workflow.when_condition_group.conditions.count() == 0
def test_update__hack_attempt_to_override_different_trigger_condition(
self, mock_action_validator: mock.MagicMock
) -> None:
fake_dcg = DataConditionGroup.objects.create(
organization=self.organization,
logic_type="any",
)
self.valid_saved_data["triggers"] = {
"id": fake_dcg.id,
"logicType": "any",
"conditions": [],
}
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
with pytest.raises(ValidationError):
validator.update(self.workflow, validator.validated_data)
def test_update__remove_action_filter(self, mock_action_validator: mock.MagicMock) -> None:
self.valid_saved_data["actionFilters"] = []
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
assert self.workflow.workflowdataconditiongroup_set.count() == 0
def test_update__add_new_filter(self, mock_action_validator: mock.MagicMock) -> None:
self.valid_saved_data["actionFilters"].append(
{
"actions": [
{
"type": Action.Type.SLACK,
"config": {
"targetIdentifier": "bar",
"targetDisplay": "baz",
"targetType": "specific",
},
"data": {},
"integrationId": self.integration.id,
}
],
"logicType": "all",
"conditions": [],
"organizationId": self.organization.id,
}
)
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
assert self.workflow.workflowdataconditiongroup_set.count() == 2
new_action_filter = (
WorkflowDataConditionGroup.objects.filter(workflow=self.workflow)
.order_by("-date_added")
.first()
)
assert new_action_filter is not None
assert new_action_filter.condition_group is not None
new_actions = Action.objects.filter(
dataconditiongroupaction__condition_group__in=[new_action_filter.condition_group.id]
)
assert new_actions.count() == 1
assert new_actions[0].type == Action.Type.SLACK
assert new_actions[0].config == {
"target_identifier": "bar",
"target_display": "baz",
"target_type": 0,
}
def test_update__remove_one_filter(self, mock_action_validator: mock.MagicMock) -> None:
# Configuration for the test
self.workflow.workflowdataconditiongroup_set.create(
condition_group=DataConditionGroup.objects.create(
organization=self.organization,
logic_type="any",
)
)
assert self.workflow.workflowdataconditiongroup_set.count() == 2
serializer = WorkflowSerializer()
attrs = serializer.get_attrs([self.workflow], self.user)
self.valid_saved_data = serializer.serialize(self.workflow, attrs[self.workflow], self.user)
self.valid_saved_data["actionFilters"].pop(0)
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
# The evaluation
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
assert self.workflow.workflowdataconditiongroup_set.count() == 1
def _get_first_trigger_condition(self, workflow: Workflow) -> DataCondition:
if workflow.when_condition_group is None:
raise AssertionError("Cannot find initial condition")
first_condition = workflow.when_condition_group.conditions.first()
if first_condition is None:
raise AssertionError("Cannot find initial condition")
return first_condition
def test_update__data_condition(self, mock_action_validator: mock.MagicMock) -> None:
first_condition = self._get_first_trigger_condition(self.workflow)
assert first_condition.comparison == 1
updated_condition = self.valid_saved_data["triggers"]["conditions"][0]
updated_condition["comparison"] = 2
self.valid_saved_data["triggers"]["conditions"][0] = updated_condition
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
first_condition = self._get_first_trigger_condition(self.workflow)
assert first_condition.comparison == updated_condition["comparison"]
def test_update__remove_one_data_condition(self, mock_action_validator: mock.MagicMock) -> None:
# Setup the test
assert self.workflow.when_condition_group
assert self.workflow.when_condition_group.conditions.count() == 1
dc = self.workflow.when_condition_group.conditions.create(
type=Condition.EQUAL,
comparison=2,
condition_result=False,
)
assert self.workflow.when_condition_group.conditions.count() == 2
serializer = WorkflowSerializer()
attrs = serializer.get_attrs([self.workflow], self.user)
self.valid_saved_data = serializer.serialize(self.workflow, attrs[self.workflow], self.user)
# Make the update
self.valid_saved_data["triggers"]["conditions"].pop(0)
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
# Check the results
assert self.workflow.when_condition_group
assert self.workflow.when_condition_group.conditions.count() == 1
assert self.workflow.when_condition_group.conditions.first() == dc
def test_update__add_new_action(self, mock_action_validator: mock.MagicMock) -> None:
self.valid_saved_data["actionFilters"][0]["actions"].append(
{
"type": Action.Type.SLACK,
"config": {
"targetIdentifier": "foo",
"targetDisplay": "bar",
"targetType": "specific",
},
"data": {},
"integrationId": self.integration.id,
}
)
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
def test_update__modify_action(self, mock_action_validator: mock.MagicMock) -> None:
workflow_condition_group = self.workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
action_condition_group = (
workflow_condition_group.condition_group.dataconditiongroupaction_set.first()
)
assert action_condition_group is not None
action = action_condition_group.action
assert action.type == Action.Type.SLACK
# Update the data for the action
self.valid_saved_data["actionFilters"][0]["actions"] = [
{
"id": action.id,
"type": Action.Type.EMAIL,
"config": {
"targetIdentifier": str(self.user.id),
"targetType": "user",
},
"data": {},
}
]
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
self.workflow.refresh_from_db()
assert self.workflow.workflowdataconditiongroup_set.count() == 1
workflow_condition_group = self.workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
action_condition_group = (
workflow_condition_group.condition_group.dataconditiongroupaction_set.first()
)
assert action_condition_group is not None
updated_action = action_condition_group.action
assert updated_action.id == action.id
assert updated_action.type == Action.Type.EMAIL
def test_update__remove_one_action(self, mock_action_validator: mock.MagicMock) -> None:
workflow_condition_group = self.workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
new_action = Action.objects.create(
type=Action.Type.EMAIL,
config={
"target_identifier": str(self.user.id),
"target_type": ActionTarget.USER,
},
data={},
integration_id=1,
)
workflow_condition_group.condition_group.dataconditiongroupaction_set.create(
action=new_action,
)
# confirm there are two actions for this condition group
assert workflow_condition_group.condition_group.dataconditiongroupaction_set.count() == 2
# remove new_action from the groups actions
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
assert workflow_condition_group.condition_group.dataconditiongroupaction_set.count() == 1
action_condition_group = (
workflow_condition_group.condition_group.dataconditiongroupaction_set.first()
)
assert action_condition_group is not None
assert action_condition_group.action.id != new_action.id
assert action_condition_group.action.type == Action.Type.SLACK
def test_update__remove_all_actions(self, mock_action_validator: mock.MagicMock) -> None:
self.valid_saved_data["actionFilters"][0]["actions"] = []
validator = WorkflowValidator(data=self.valid_saved_data, context=self.context)
assert validator.is_valid() is True
validator.update(self.workflow, validator.validated_data)
workflow_condition_group = self.workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
assert workflow_condition_group.condition_group.dataconditiongroupaction_set.count() == 0
| TestWorkflowValidatorUpdate |
python | tensorflow__tensorflow | tensorflow/python/saved_model/nested_structure_coder_test.py | {
"start": 21542,
"end": 21915
} | class ____(type_spec.TypeSpec):
value_type = property(lambda self: None)
_component_specs = property(lambda self: ())
_to_components = lambda self, v: ()
_from_components = classmethod(lambda cls, c: cls())
_serialize = lambda self: ()
# Trivial TypeSpec class for testing.
@type_spec_registry.register("NestedStructureTest.RegisteredTypeSpec")
| UnregisteredTypeSpec |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 107584,
"end": 114029
} | class ____(abc.ABC):
"""
Used as a mixin by geometry transforms
(for use with :py:meth:`~PIL.Image.Image.transform`)
"""
@abc.abstractmethod
def transform(
self,
size: tuple[int, int],
image: Image,
**options: Any,
) -> Image:
pass
# --------------------------------------------------------------------
# Factories
def _check_size(size: Any) -> None:
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: None, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
msg = "Size must be a list or tuple"
raise ValueError(msg)
if len(size) != 2:
msg = "Size must be a sequence of length 2"
raise ValueError(msg)
if size[0] < 0 or size[1] < 0:
msg = "Width and height must be >= 0"
raise ValueError(msg)
def new(
mode: str,
size: tuple[int, int] | list[int],
color: float | tuple[float, ...] | str | None = 0,
) -> Image:
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black. If given,
this should be a single integer or floating point value for single-band
modes, and a tuple for multi-band modes (one value per band). When
creating RGB or HSV images, you can also use color strings as supported
by the ImageColor module. See :ref:`colors` for more information. If the
color is None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isinstance(color, str):
# css3-style specifier
from . import ImageColor
color = ImageColor.getcolor(color, mode)
im = Image()
if (
mode == "P"
and isinstance(color, (list, tuple))
and all(isinstance(i, int) for i in color)
):
color_ints: tuple[int, ...] = cast(tuple[int, ...], tuple(color))
if len(color_ints) == 3 or len(color_ints) == 4:
# RGB or RGBA value for a P image
from . import ImagePalette
im.palette = ImagePalette.ImagePalette()
color = im.palette.getcolor(color_ints)
return im._new(core.fill(mode, size, color))
def frombytes(
mode: str,
size: tuple[int, int],
data: bytes | bytearray | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> Image:
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Codec <file-codecs>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
im = new(mode, size)
if im.width != 0 and im.height != 0:
decoder_args: Any = args
if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple):
# may pass tuple instead of argument list
decoder_args = decoder_args[0]
if decoder_name == "raw" and decoder_args == ():
decoder_args = mode
im.frombytes(data, decoder_name, decoder_args)
return im
def frombuffer(
mode: str,
size: tuple[int, int],
data: bytes | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> Image:
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load it.
The default parameters used for the "raw" decoder differs from that used for
:py:func:`~PIL.Image.frombytes`. This is a bug, and will probably be fixed in a
future release. The current release issues a warning if you do this; to disable
the warning, you should provide the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, 1
if args[0] in _MAPMODES:
im = new(mode, (0, 0))
im = im._new(core.map_buffer(data, size, decoder_name, 0, args))
if mode == "P":
from . import ImagePalette
im.palette = ImagePalette.ImagePalette("RGB", im.im.getpalette("RGB"))
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
| ImageTransformHandler |
python | google__jax | tests/array_interoperability_test.py | {
"start": 15487,
"end": 15887
} | class ____(jtu.JaxTestCase):
@unittest.skipIf((not tf or tf_version < (2, 5, 0)),
"Test requires TensorFlow 2.5.0 or newer")
def testJaxAndTfHaveTheSameBfloat16Type(self):
self.assertEqual(np.dtype(jnp.bfloat16).num,
np.dtype(tf.dtypes.bfloat16.as_numpy_dtype).num)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| Bfloat16Test |
python | kamyu104__LeetCode-Solutions | Python/string-compression.py | {
"start": 29,
"end": 880
} | class ____(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
anchor, write = 0, 0
for read, c in enumerate(chars):
if read+1 == len(chars) or chars[read+1] != c:
chars[write] = chars[anchor]
write += 1
if read > anchor:
n, left = read-anchor+1, write
while n > 0:
chars[write] = chr(n%10+ord('0'))
write += 1
n /= 10
right = write-1
while left < right:
chars[left], chars[right] = chars[right], chars[left]
left += 1
right -= 1
anchor = read+1
return write
| Solution |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 21337,
"end": 21980
} | class ____(TestCase):
def test_init(self):
try:
foo = FacetDateField(model_attr="foo")
foo_exact = FacetDateField(facet_for="bar")
except:
self.fail()
self.assertEqual(foo.facet_for, None)
self.assertEqual(foo_exact.null, True)
self.assertEqual(foo_exact.facet_for, "bar")
def test_prepare(self):
mock = MockModel()
mock.user = "daniel"
mock.created = datetime.date(2010, 10, 30)
created = FacetDateField(model_attr="created")
self.assertEqual(created.prepare(mock), datetime.date(2010, 10, 30))
| FacetDateFieldTestCase |
python | kamyu104__LeetCode-Solutions | Python/largest-plus-sign.py | {
"start": 33,
"end": 1029
} | class ____(object):
def orderOfLargestPlusSign(self, N, mines):
"""
:type N: int
:type mines: List[List[int]]
:rtype: int
"""
lookup = {tuple(mine) for mine in mines}
dp = [[0] * N for _ in xrange(N)]
result = 0
for i in xrange(N):
l = 0
for j in xrange(N):
l = 0 if (i, j) in lookup else l+1
dp[i][j] = l
l = 0
for j in reversed(xrange(N)):
l = 0 if (i, j) in lookup else l+1
dp[i][j] = min(dp[i][j], l)
for j in xrange(N):
l = 0
for i in xrange(N):
l = 0 if (i, j) in lookup else l+1
dp[i][j] = min(dp[i][j], l)
l = 0
for i in reversed(xrange(N)):
l = 0 if (i, j) in lookup else l+1
dp[i][j] = min(dp[i][j], l)
result = max(result, dp[i][j])
return result
| Solution |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 7708,
"end": 7950
} | class ____(PreTrainedModel):
config: DepthAnythingConfig
base_model_prefix = "depth_anything"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
| DepthAnythingPreTrainedModel |
python | great-expectations__great_expectations | great_expectations/render/renderer_configuration.py | {
"start": 4499,
"end": 4652
} | class ____(TypedDict):
"""Notes that can be added to the meta field of an Expectation."""
format: MetaNotesFormat
content: List[str]
| MetaNotes |
python | kamyu104__LeetCode-Solutions | Python/set-matrix-zeroes.py | {
"start": 62,
"end": 977
} | class ____(object):
# @param matrix, a list of lists of integers
# RETURN NOTHING, MODIFY matrix IN PLACE.
def setZeroes(self, matrix):
first_col = reduce(lambda acc, i: acc or matrix[i][0] == 0, xrange(len(matrix)), False)
first_row = reduce(lambda acc, j: acc or matrix[0][j] == 0, xrange(len(matrix[0])), False)
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][j] == 0:
matrix[i][0], matrix[0][j] = 0, 0
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if first_col:
for i in xrange(len(matrix)):
matrix[i][0] = 0
if first_row:
for j in xrange(len(matrix[0])):
matrix[0][j] = 0
| Solution |
python | getsentry__sentry | tests/snuba/tagstore/test_tagstore_backend.py | {
"start": 1511,
"end": 42383
} | class ____(TestCase, SnubaTestCase, SearchIssueTestMixin, PerformanceIssueTestCase):
def setUp(self) -> None:
super().setUp()
self.ts = SnubaTagStorage()
self.proj1 = self.create_project()
env1 = "test"
env2 = "test2"
self.env3 = Environment.objects.create(
organization_id=self.proj1.organization_id, name="test3"
)
self.now = timezone.now().replace(microsecond=0)
self.store_event(
data={
"event_id": "1" * 32,
"message": "message 1",
"platform": "python",
"environment": env1,
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
"sentry:release": 100,
"sentry:user": "id:user1",
},
"user": {"id": "user1"},
"exception": exception,
},
project_id=self.proj1.id,
)
self.proj1group1 = self.store_event(
data={
"event_id": "2" * 32,
"message": "message 1",
"platform": "python",
"environment": env1,
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
"sentry:release": 200,
"sentry:user": "id:user2",
},
"user": {"id": "user2"},
"exception": exception,
},
project_id=self.proj1.id,
).group
self.proj1group2 = self.store_event(
data={
"event_id": "3" * 32,
"message": "message 2",
"platform": "python",
"environment": env1,
"fingerprint": ["group-2"],
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"tags": {"browser": "chrome", "sentry:user": "id:user1"},
"user": {"id": "user1"},
},
project_id=self.proj1.id,
).group
self.store_event(
data={
"event_id": "4" * 32,
"message": "message2",
"platform": "python",
"environment": env2,
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"tags": {"foo": "bar"},
},
project_id=self.proj1.id,
)
self.proj1env1 = Environment.objects.get(name=env1)
self.proj1env2 = Environment.objects.get(name=env2)
@cached_property
def perf_group_and_env(self):
env_name = "test"
env = Environment.objects.get(name=env_name)
event_data = load_data("transaction-n-plus-one", timestamp=before_now(minutes=10))
event_data["environment"] = env_name
event = self.create_performance_issue(
event_data={
**event_data,
"event_id": "a" * 32,
"timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"start_timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"tags": {"foo": "bar", "biz": "baz"},
"release": "releaseme",
}
)
self.create_performance_issue(
event_data={
**event_data,
"event_id": "b" * 32,
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"start_timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"tags": {"foo": "quux"},
"release": "releaseme",
}
)
perf_group = event.group
return perf_group, env
@cached_property
def generic_group_and_env(self):
env = Environment.objects.get(name="test")
_, _, group_info = self.store_search_issue(
self.project.id,
self.user.id,
[f"{ProfileFileIOGroupType.type_id}-group1"],
env.name,
timezone.now().replace(hour=0, minute=0, second=0) + timedelta(minutes=1),
[("foo", "bar"), ("biz", "baz")],
"releaseme",
)
assert group_info is not None
return group_info.group, env
def test_get_group_tag_keys_and_top_values(self) -> None:
result = list(
self.ts.get_group_tag_keys_and_top_values(
self.proj1group1,
[self.proj1env1.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
)
tags = [r.key for r in result]
assert set(tags) == {"foo", "baz", "environment", "sentry:release", "sentry:user", "level"}
result.sort(key=lambda r: r.key)
assert result[0].key == "baz"
assert result[0].top_values[0].value == "quux"
assert result[0].count == 2
assert result[4].key == "sentry:release"
assert result[4].count == 2
top_release_values = result[4].top_values
assert len(top_release_values) == 2
assert {v.value for v in top_release_values} == {"100", "200"}
assert all(v.times_seen == 1 for v in top_release_values)
# Now with only a specific set of keys,
result = list(
self.ts.get_group_tag_keys_and_top_values(
self.proj1group1,
[self.proj1env1.id],
keys=["environment", "sentry:release"],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
)
tags = [r.key for r in result]
assert set(tags) == {"environment", "sentry:release"}
result.sort(key=lambda r: r.key)
assert result[0].key == "environment"
assert result[0].top_values[0].value == "test"
assert result[1].key == "sentry:release"
top_release_values = result[1].top_values
assert len(top_release_values) == 2
assert {v.value for v in top_release_values} == {"100", "200"}
assert all(v.times_seen == 1 for v in top_release_values)
def test_get_group_tag_keys_and_top_values_perf_issue(self) -> None:
perf_group, env = self.perf_group_and_env
result = list(
self.ts.get_group_tag_keys_and_top_values(
perf_group,
[env.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
)
tags = [r.key for r in result]
assert set(tags) == {
"biz",
"browser",
"browser.name",
"client_os",
"client_os.name",
"device",
"device.family",
"environment",
"foo",
"level",
"runtime",
"runtime.name",
"sentry:release",
"sentry:user",
"transaction",
"url",
}
result.sort(key=lambda r: r.key)
assert result[0].key == "biz"
# Include-empty-values may surface "" alongside "baz"; don't rely on order
biz_values = {tv.value: tv.times_seen for tv in result[0].top_values}
assert biz_values.get("baz") == 1
assert result[0].count == sum(biz_values.values())
assert result[12].key == "sentry:release"
assert result[12].count == 2
top_release_values = result[12].top_values
assert len(top_release_values) == 1
assert {v.value for v in top_release_values} == {"releaseme"}
assert all(v.times_seen == 2 for v in top_release_values)
# Now with only a specific set of keys,
result = list(
self.ts.get_group_tag_keys_and_top_values(
perf_group,
[env.id],
keys=["environment", "sentry:release"],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
)
tags = [r.key for r in result]
assert set(tags) == {"environment", "sentry:release"}
result.sort(key=lambda r: r.key)
assert result[0].key == "environment"
assert result[0].top_values[0].value == "test"
assert result[1].key == "sentry:release"
top_release_values = result[1].top_values
assert len(top_release_values) == 1
assert {v.value for v in top_release_values} == {"releaseme"}
assert all(v.times_seen == 2 for v in top_release_values)
def test_get_group_tag_keys_and_top_values_generic_issue(self) -> None:
group, env = self.generic_group_and_env
result = list(
self.ts.get_group_tag_keys_and_top_values(
group, [env.id], tenant_ids={"referrer": "r", "organization_id": 1234}
)
)
tags = [r.key for r in result]
assert set(tags) == {"foo", "biz", "environment", "sentry:user", "level", "sentry:release"}
result.sort(key=lambda r: r.key)
assert result[0].key == "biz"
assert result[0].top_values[0].value == "baz"
assert result[0].count == 1
assert result[4].key == "sentry:release"
assert result[4].count == 1
top_release_values = result[4].top_values
assert len(top_release_values) == 1
assert {v.value for v in top_release_values} == {"releaseme"}
assert all(v.times_seen == 1 for v in top_release_values)
# Now with only a specific set of keys,
result = list(
self.ts.get_group_tag_keys_and_top_values(
group,
[env.id],
keys=["environment", "sentry:release"],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
)
tags = [r.key for r in result]
assert set(tags) == {"environment", "sentry:release"}
result.sort(key=lambda r: r.key)
assert result[0].key == "environment"
assert result[0].top_values[0].value == "test"
assert result[1].key == "sentry:release"
top_release_values = result[1].top_values
assert len(top_release_values) == 1
assert {v.value for v in top_release_values} == {"releaseme"}
assert all(v.times_seen == 1 for v in top_release_values)
# assert False
def test_get_top_group_tag_values(self) -> None:
resp = self.ts.get_top_group_tag_values(
self.proj1group1,
self.proj1env1.id,
"foo",
1,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert len(resp) == 1
assert resp[0].times_seen == 2
assert resp[0].key == "foo"
assert resp[0].value == "bar"
assert resp[0].group_id == self.proj1group1.id
def test_get_top_group_tag_values_perf(self) -> None:
perf_group, env = self.perf_group_and_env
resp = self.ts.get_top_group_tag_values(
perf_group,
env.id,
"foo",
2,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert len(resp) == 2
assert resp[0].times_seen == 1
assert resp[0].key == "foo"
assert resp[0].value == "bar"
assert resp[0].group_id == perf_group.id
assert resp[1].times_seen == 1
assert resp[1].key == "foo"
assert resp[1].value == "quux"
assert resp[1].group_id == perf_group.id
def test_get_top_group_tag_values_generic(self) -> None:
group, env = self.generic_group_and_env
resp = self.ts.get_top_group_tag_values(
group, env.id, "foo", 1, tenant_ids={"referrer": "r", "organization_id": 1234}
)
assert len(resp) == 1
assert resp[0].times_seen == 1
assert resp[0].key == "foo"
assert resp[0].value == "bar"
assert resp[0].group_id == group.id
def test_get_group_tag_value_count(self) -> None:
assert (
self.ts.get_group_tag_value_count(
self.proj1group1,
self.proj1env1.id,
"foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== 2
)
def test_get_group_tag_value_count_perf(self) -> None:
perf_group, env = self.perf_group_and_env
assert (
self.ts.get_group_tag_value_count(
perf_group, env.id, "foo", {"referrer": "r", "organization_id": 1234}
)
== 2
)
def test_get_group_tag_value_count_generic(self) -> None:
group, env = self.generic_group_and_env
assert (
self.ts.get_group_tag_value_count(
group, env.id, "foo", {"referrer": "r", "organization_id": 1234}
)
== 1
)
def test_get_tag_keys(self) -> None:
expected_keys = {
"baz",
"browser",
"environment",
"foo",
"sentry:release",
"sentry:user",
"level",
}
keys = {
k.key: k
for k in self.ts.get_tag_keys(
project_id=self.proj1.id,
environment_id=self.proj1env1.id,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
assert set(keys) == expected_keys
keys = {
k.key: k
for k in self.ts.get_tag_keys(
project_id=self.proj1.id,
environment_id=self.proj1env1.id,
include_values_seen=True,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
assert set(keys) == expected_keys
def test_get_tag_keys_removed_from_denylist(self) -> None:
denylist_keys = frozenset(["browser", "sentry:release"])
expected_keys = {
"baz",
"environment",
"foo",
"sentry:user",
"level",
}
keys = {
k.key: k
for k in self.ts.get_tag_keys(
project_id=self.proj1.id,
environment_id=self.proj1env1.id,
denylist=denylist_keys,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
assert set(keys) == expected_keys
keys = {
k.key: k
for k in self.ts.get_tag_keys(
project_id=self.proj1.id,
environment_id=self.proj1env1.id,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
expected_keys |= {"browser", "sentry:release"}
assert set(keys) == expected_keys
def test_get_group_tag_key(self) -> None:
with pytest.raises(GroupTagKeyNotFound):
self.ts.get_group_tag_key(
group=self.proj1group1,
environment_id=self.proj1env1.id,
key="notreal",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert (
self.ts.get_group_tag_key(
group=self.proj1group1,
environment_id=self.proj1env1.id,
key="foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
).key
== "foo"
)
keys = {
k.key: k
for k in self.ts.get_group_tag_keys(
self.proj1group1,
[self.proj1env1.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
assert set(keys) == {"baz", "environment", "foo", "sentry:release", "sentry:user", "level"}
def test_get_group_tag_key_perf(self) -> None:
perf_group, env = self.perf_group_and_env
with pytest.raises(GroupTagKeyNotFound):
self.ts.get_group_tag_key(
group=perf_group,
environment_id=env.id,
key="notreal",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert (
self.ts.get_group_tag_key(
group=perf_group,
environment_id=self.proj1env1.id,
key="foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
).key
== "foo"
)
keys = {
k.key: k
for k in self.ts.get_group_tag_keys(
perf_group,
[env.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
}
assert set(keys) == {
"biz",
"browser",
"browser.name",
"client_os",
"client_os.name",
"device",
"device.family",
"environment",
"foo",
"level",
"runtime",
"runtime.name",
"sentry:release",
"sentry:user",
"transaction",
"url",
}
def test_get_group_tag_key_generic(self) -> None:
group, env = self.generic_group_and_env
with pytest.raises(GroupTagKeyNotFound):
self.ts.get_group_tag_key(
group=group,
environment_id=env.id,
key="notreal",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert (
self.ts.get_group_tag_key(
group=group,
environment_id=self.proj1env1.id,
key="foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
).key
== "foo"
)
keys = {
k.key: k
for k in self.ts.get_group_tag_keys(
group, [env.id], tenant_ids={"referrer": "r", "organization_id": 1234}
)
}
assert set(keys) == {"biz", "environment", "foo", "sentry:user", "level", "sentry:release"}
def test_get_tag_key(self) -> None:
with pytest.raises(TagKeyNotFound):
self.ts.get_tag_key(
project_id=self.proj1.id,
environment_id=self.proj1env1.id,
key="notreal",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
def test_get_tag_value_label(self) -> None:
assert self.ts.get_tag_value_label("foo", "notreal") == "notreal"
assert self.ts.get_tag_value_label("sentry:user", None) is None
assert self.ts.get_tag_value_label("sentry:user", "id:stuff") == "stuff"
assert self.ts.get_tag_value_label("sentry:user", "email:stuff") == "stuff"
assert self.ts.get_tag_value_label("sentry:user", "username:stuff") == "stuff"
assert self.ts.get_tag_value_label("sentry:user", "ip:stuff") == "stuff"
def test_get_groups_user_counts(self) -> None:
assert self.ts.get_groups_user_counts(
project_ids=[self.proj1.id],
group_ids=[self.proj1group1.id, self.proj1group2.id],
environment_ids=[self.proj1env1.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {self.proj1group1.id: 2, self.proj1group2.id: 1}
# test filtering by date range where there shouldn't be results
assert (
self.ts.get_groups_user_counts(
project_ids=[self.proj1.id],
group_ids=[self.proj1group1.id, self.proj1group2.id],
environment_ids=[self.proj1env1.id],
start=self.now - timedelta(days=5),
end=self.now - timedelta(days=4),
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== {}
)
def test_get_groups_user_counts_no_environments(self) -> None:
self.store_event(
data={
"event_id": "3" * 32,
"message": "message 1",
"platform": "python",
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
"sentry:release": 100,
"sentry:user": "id:user3",
},
"user": {"id": "user3"},
"exception": exception,
},
project_id=self.proj1.id,
)
assert self.ts.get_groups_user_counts(
project_ids=[self.proj1.id],
group_ids=[self.proj1group1.id, self.proj1group2.id],
environment_ids=None,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {self.proj1group1.id: 3, self.proj1group2.id: 1}
def test_get_release_tags(self) -> None:
tags = list(
self.ts.get_release_tags(self.proj1.organization_id, [self.proj1.id], None, ["100"])
)
assert len(tags) == 1
one_second_ago = self.now - timedelta(seconds=1)
assert tags[0].last_seen == one_second_ago
assert tags[0].first_seen == one_second_ago
assert tags[0].times_seen == 1
assert tags[0].key == "sentry:release"
def test_get_release_tags_uses_release_project_environment(self) -> None:
tags = list(
self.ts.get_release_tags(self.proj1.organization_id, [self.proj1.id], None, ["100"])
)
assert len(tags) == 1
one_second_ago = self.now - timedelta(seconds=1)
assert tags[0].last_seen == one_second_ago
assert tags[0].first_seen == one_second_ago
assert tags[0].times_seen == 1
one_day_ago = self.now - timedelta(days=1)
two_days_ago = self.now - timedelta(days=2)
self.store_event(
data={
"event_id": "5" * 32,
"message": "message3",
"platform": "python",
"environment": None,
"fingerprint": ["group-1"],
"timestamp": one_day_ago.isoformat(),
"tags": {
"sentry:release": 100,
},
},
project_id=self.proj1.id,
)
release = Release.objects.create(version="100", organization=self.organization)
ReleaseProjectEnvironment.objects.create(
release_id=release.id,
project_id=self.proj1.id,
environment_id=self.env3.id,
first_seen=one_day_ago,
)
self.store_event(
data={
"event_id": "6" * 32,
"message": "message3",
"platform": "python",
"environment": None,
"fingerprint": ["group-1"],
"timestamp": two_days_ago.isoformat(),
"tags": {
"sentry:release": 100,
},
},
project_id=self.proj1.id,
)
tags = list(
self.ts.get_release_tags(self.proj1.organization_id, [self.proj1.id], None, ["100"])
)
assert tags[0].last_seen == one_second_ago
assert tags[0].first_seen == one_day_ago
assert (
tags[0].times_seen == 2
) # Isn't 3 because start was limited by the ReleaseProjectEnvironment entry
def test_get_tag_value_paginator(self) -> None:
from sentry.tagstore.types import TagValue
assert list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"sentry:user",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
TagValue(
key="sentry:user",
value="id:user1",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=1),
),
TagValue(
key="sentry:user",
value="id:user2",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
assert list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"sentry:user",
query="user1",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
TagValue(
key="sentry:user",
value="id:user1",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=1),
)
]
def test_get_tag_value_paginator_with_dates(self) -> None:
from sentry.tagstore.types import TagValue
day_ago = self.now - timedelta(days=1)
two_days_ago = self.now - timedelta(days=2)
assert list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"sentry:user",
start=day_ago,
end=self.now,
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
TagValue(
key="sentry:user",
value="id:user1",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=1),
),
TagValue(
key="sentry:user",
value="id:user2",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
day_ago = self.now - timedelta(days=1)
assert (
list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"sentry:user",
start=two_days_ago,
end=day_ago,
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
)
== []
)
def test_numeric_tag_value_paginator(self) -> None:
from sentry.tagstore.types import TagValue
assert list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"stack.lineno",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
TagValue(
key="stack.lineno",
value="29",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=1),
)
]
assert list(
self.ts.get_tag_value_paginator(
self.proj1.id,
self.proj1env1.id,
"stack.lineno",
query="30",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
TagValue(
key="stack.lineno",
value="29",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=1),
)
]
def test_get_group_tag_value_iter(self) -> None:
from sentry.tagstore.types import GroupTagValue
assert list(
self.ts.get_group_tag_value_iter(
self.proj1group1,
[self.proj1env1.id],
"sentry:user",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
) == [
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user1",
times_seen=1,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user2",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
def test_get_group_tag_value_iter_perf(self) -> None:
from sentry.tagstore.types import GroupTagValue
group, env = self.perf_group_and_env
assert list(
self.ts.get_group_tag_value_iter(
group,
[env.id],
"foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
) == [
GroupTagValue(
group_id=group.id,
key="foo",
value="bar",
times_seen=1,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
GroupTagValue(
group_id=group.id,
key="foo",
value="quux",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
def test_get_group_tag_value_paginator(self) -> None:
from sentry.tagstore.types import GroupTagValue
assert list(
self.ts.get_group_tag_value_paginator(
self.proj1group1,
[self.proj1env1.id],
"sentry:user",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user1",
times_seen=1,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user2",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
def test_get_group_tag_value_paginator_perf(self) -> None:
from sentry.tagstore.types import GroupTagValue
group, env = self.perf_group_and_env
assert list(
self.ts.get_group_tag_value_paginator(
group,
[env.id],
"foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
GroupTagValue(
group_id=group.id,
key="foo",
value="bar",
times_seen=1,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
GroupTagValue(
group_id=group.id,
key="foo",
value="quux",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
def test_get_group_tag_value_paginator_times_seen(self) -> None:
from sentry.tagstore.types import GroupTagValue
self.store_event(
data={
"event_id": "5" * 32,
"message": "message 1",
"platform": "python",
"environment": self.proj1env1.name,
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
"sentry:release": 100,
"sentry:user": "id:user2",
},
"user": {"id": "user2"},
"exception": exception,
},
project_id=self.proj1.id,
)
assert list(
self.ts.get_group_tag_value_paginator(
self.proj1group1,
[self.proj1env1.id],
"sentry:user",
order_by="-times_seen",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user2",
times_seen=2,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
GroupTagValue(
group_id=self.proj1group1.id,
key="sentry:user",
value="id:user1",
times_seen=1,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
]
def test_get_group_tag_value_paginator_times_seen_perf(self) -> None:
from sentry.tagstore.types import GroupTagValue
group, env = self.perf_group_and_env
event_data = load_data("transaction-n-plus-one", timestamp=before_now(minutes=10))
self.create_performance_issue(
event_data={
**event_data,
"event_id": "a" * 32,
"timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"start_timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"tags": {"foo": "bar", "biz": "baz"},
"release": "releaseme",
"environment": env.name,
}
)
assert list(
self.ts.get_group_tag_value_paginator(
group,
[env.id],
"foo",
order_by="-times_seen",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(10)
) == [
GroupTagValue(
group_id=group.id,
key="foo",
value="bar",
times_seen=2,
first_seen=self.now - timedelta(seconds=1),
last_seen=self.now - timedelta(seconds=1),
),
GroupTagValue(
group_id=group.id,
key="foo",
value="quux",
times_seen=1,
first_seen=self.now - timedelta(seconds=2),
last_seen=self.now - timedelta(seconds=2),
),
]
# mock default value only for "limit" argument of get_group_tag_value_iter()
# it is set to 1 to avoid creating 1000+ tags for the test
@mock.patch.object(
SnubaTagStorage.get_group_tag_value_iter,
"__defaults__",
(
"-first_seen", # orderby default (unchanged)
1, # limit default (set to 1 for test)
0, # offset default (unchanged)
None, # tenant_ids default (unchanged)
),
)
def test_get_group_tag_value_paginator_sort_by_last_seen(self) -> None:
# the tag with "quux" value has the lowest "first_seen"
self.store_event(
data={
"event_id": "5" * 32,
"message": "message 1",
"platform": "python",
"environment": "test",
"fingerprint": ["group-1"],
"timestamp": (self.now - timedelta(seconds=5)).isoformat(),
"tags": {
"foo": "quux",
},
"user": {"id": "user1"},
"exception": exception,
},
project_id=self.proj1.id,
)
# the tag with "quux" value has the highest "last_seen"
self.store_event(
data={
"event_id": "6" * 32,
"message": "message 1",
"platform": "python",
"environment": "test",
"fingerprint": ["group-1"],
"timestamp": self.now.isoformat(),
"tags": {
"foo": "quux",
},
"user": {"id": "user1"},
"exception": exception,
},
project_id=self.proj1.id,
)
top_key = self.ts.get_group_tag_value_paginator(
self.proj1group1,
[],
"foo",
tenant_ids={"referrer": "r", "organization_id": 1234},
).get_result(1)[0]
# top key should be "quux" as it's the most recent than "bar"
assert top_key.value == "quux"
def test_error_upsampling_tag_value_counts(self) -> None:
"""Test that tag value counts are properly weighted when projects use error upsampling."""
# Set up allowlisted project for error upsampling
with self.options({"issues.client_error_sampling.project_allowlist": [self.proj1.id]}):
# Create first event with sample_weight=10 and tag value "alpha"
event1 = self.store_event(
data={
"event_id": "a1" * 16,
"message": "Error event with high sample weight",
"type": "error",
"exception": exception,
"timestamp": (self.now - timedelta(seconds=1)).isoformat(),
"fingerprint": ["error-upsampling-group"],
"tags": {
"custom_tag": "alpha",
"environment": "test",
},
# This creates a sample_weight of 10 (1/0.1)
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.proj1.id,
)
# Create second event with sample_weight=5 and tag value "beta"
self.store_event(
data={
"event_id": "b2" * 16,
"message": "Error event with medium sample weight",
"type": "error",
"exception": exception,
"timestamp": (self.now - timedelta(seconds=2)).isoformat(),
"fingerprint": ["error-upsampling-group"],
"tags": {
"custom_tag": "beta",
"environment": "test",
},
# This creates a sample_weight of 5 (1/0.2)
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.proj1.id,
)
# Get the group from one of the events
error_upsampling_group = event1.group
# Test get_top_group_tag_values with error upsampling
resp = self.ts.get_top_group_tag_values(
error_upsampling_group,
self.proj1env1.id,
"custom_tag",
10, # limit
tenant_ids={"referrer": "r", "organization_id": 1234},
)
# Verify we get both tag values
assert len(resp) == 2
# Sort by times_seen descending to get consistent order
resp = sorted(resp, key=lambda x: x.times_seen, reverse=True)
# First tag value should be "alpha" with times_seen=10 (sample_weight)
assert resp[0].key == "custom_tag"
assert resp[0].value == "alpha"
assert resp[0].times_seen == 10
assert resp[0].group_id == error_upsampling_group.id
# Second tag value should be "beta" with times_seen=5 (sample_weight)
assert resp[1].key == "custom_tag"
assert resp[1].value == "beta"
assert resp[1].times_seen == 5
assert resp[1].group_id == error_upsampling_group.id
# Also test get_group_tag_value_count for total count
total_count = self.ts.get_group_tag_value_count(
error_upsampling_group,
self.proj1env1.id,
"custom_tag",
tenant_ids={"referrer": "r", "organization_id": 1234},
)
# Total should be 10 + 5 = 15 (weighted sum, not 2 raw events)
assert total_count == 15
| TagStorageTest |
python | pytorch__pytorch | torch/_dynamo/test_minifier_common.py | {
"start": 1096,
"end": 2208
} | class ____:
minifier_code: str
repro_code: str
def _get_module(self, t: str) -> str:
match = re.search(r"class Repro\(torch\.nn\.Module\):\s+([ ].*\n| *\n)+", t)
assert match is not None, "failed to find module"
r = match.group(0)
r = re.sub(r"\s+$", "\n", r, flags=re.MULTILINE)
r = re.sub(r"\n{3,}", "\n\n", r)
return r.strip()
def get_exported_program_path(self) -> Optional[str]:
# Extract the exported program file path from AOTI minifier's repro.py
# Regular expression pattern to match the file path
pattern = r'torch\.export\.load\(\s*["\'](.*?)["\']\s*\)'
# Search for the pattern in the text
match = re.search(pattern, self.repro_code)
# Extract and print the file path if a match is found
if match:
file_path = match.group(1)
return file_path
return None
def minifier_module(self) -> str:
return self._get_module(self.minifier_code)
def repro_module(self) -> str:
return self._get_module(self.repro_code)
| MinifierTestResult |
python | getsentry__sentry | src/sentry/api/endpoints/auth_config.py | {
"start": 941,
"end": 3609
} | class ____(Endpoint, OrganizationMixin):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ENTERPRISE
# Disable authentication and permission requirements.
permission_classes = ()
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase:
self.active_organization = determine_active_organization(request)
return super().dispatch(request, *args, **kwargs)
def get(self, request: Request, *args, **kwargs) -> Response:
"""
Get context required to show a login page. Registration is handled elsewhere.
"""
if request.user.is_authenticated:
return self.respond_authenticated(request)
next_uri = self.get_next_uri(request)
# we always reset the state on GET so you don't end up at an odd location
initiate_login(request, next_uri)
# Auth login verifies the test cookie is set
request.session.set_test_cookie()
# Single org mode -- send them to the org-specific handler
if settings.SENTRY_SINGLE_ORGANIZATION:
org = Organization.get_default()
return Response({"nextUri": reverse("sentry-auth-organization", args=[org.slug])})
session_expired = "session_expired" in request.COOKIES
payload = self.prepare_login_context(request, *args, **kwargs)
response = Response(payload)
if session_expired:
response.delete_cookie("session_expired")
return response
def respond_authenticated(self, request: Request):
next_uri = self.get_next_uri(request)
if not is_valid_redirect(next_uri, allowed_hosts=(request.get_host(),)):
next_uri = get_org_redirect_url(
request, self.active_organization.organization if self.active_organization else None
)
return Response({"nextUri": next_uri})
def get_next_uri(self, request: HttpRequest) -> str:
next_uri_fallback = request.session.pop("_next", None)
return request.GET.get(REDIRECT_FIELD_NAME, next_uri_fallback)
def prepare_login_context(self, request: Request, *args, **kwargs):
can_register = bool(has_user_registration() or request.session.get("can_register"))
context = {
"serverHostname": get_server_hostname(),
"canRegister": can_register,
"hasNewsletter": newsletter.backend.is_enabled(),
}
if "session_expired" in request.COOKIES:
context["warning"] = WARN_SESSION_EXPIRED
context.update(additional_context.run_callbacks(request))
return context
| AuthConfigEndpoint |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/binary_test.py | {
"start": 2203,
"end": 3299
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.inputs = {
"input_one": torch.randn(M, N, K, device=device).to(dtype=dtype_one),
"input_two": torch.randn(M, N, K, device=device).to(dtype=dtype_two),
}
self.op_func = op_func
def forward(self, input_one, input_two):
return self.op_func(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_list, binary_short_configs + binary_long_configs, BinaryOpBenchmark
)
######
# Benchmark ops performance for boolean dtype
######
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[["logical_and", torch.logical_and]],
)
# Configs with broadcast
binary_configs_broadcast = op_bench.config_list(
attr_names=["in_one", "in_two"],
attrs=[
[[64, 1, 64], [1, 64, 1]],
],
cross_product_configs={
"device": ["cpu"],
"dtype": [torch.bool],
},
tags=["short"],
)
| BinaryOpBenchmark |
python | mozilla__bleach | bleach/_vendor/html5lib/serializer.py | {
"start": 3635,
"end": 15682
} | class ____(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifier contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
| HTMLSerializer |
python | python-markdown__markdown | markdown/extensions/legacy_em.py | {
"start": 667,
"end": 1294
} | class ____(UnderscoreProcessor):
"""Emphasis processor for handling strong and em matches inside underscores."""
PATTERNS = [
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
]
| LegacyUnderscoreProcessor |
python | python__mypy | mypy/test/data.py | {
"start": 17260,
"end": 26402
} | class ____:
"""Parsed test caseitem.
An item is of the form
[id arg]
.. data ..
"""
id: str
arg: str | None
# Processed, collapsed text data
data: list[str]
# Start line: 1-based, inclusive, relative to testcase
line: int
# End line: 1-based, exclusive, relative to testcase; not same as `line + len(test_item.data)` due to collapsing
end_line: int
@property
def trimmed_newlines(self) -> int: # compensates for strip_list
return self.end_line - self.line - len(self.data)
def parse_test_data(raw_data: str, name: str) -> list[TestItem]:
"""Parse a list of lines that represent a sequence of test items."""
lines = ["", "[case " + name + "]"] + raw_data.split("\n")
ret: list[TestItem] = []
data: list[str] = []
id: str | None = None
arg: str | None = None
i = 0
i0 = 0
while i < len(lines):
s = lines[i].strip()
if lines[i].startswith("[") and s.endswith("]"):
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, i0 + 1, i))
i0 = i
id = s[1:-1]
arg = None
if " " in id:
arg = id[id.index(" ") + 1 :]
id = id[: id.index(" ")]
data = []
elif lines[i].startswith("\\["):
data.append(lines[i][1:])
elif not lines[i].startswith("--"):
data.append(lines[i])
elif lines[i].startswith("----"):
data.append(lines[i][2:])
i += 1
# Process the last item.
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, i0 + 1, i - 1))
return ret
def strip_list(l: list[str]) -> list[str]:
"""Return a stripped copy of l.
Strip whitespace at the end of all lines, and strip all empty
lines from the end of the array.
"""
r: list[str] = []
for s in l:
# Strip spaces at end of line
r.append(re.sub(r"\s+$", "", s))
while r and r[-1] == "":
r.pop()
return r
def collapse_line_continuation(l: list[str]) -> list[str]:
r: list[str] = []
cont = False
for s in l:
ss = re.sub(r"\\$", "", s)
if cont:
r[-1] += re.sub("^ +", "", ss)
else:
r.append(ss)
cont = s.endswith("\\")
return r
def expand_variables(s: str) -> str:
return s.replace("<ROOT>", root_dir)
def expand_errors(input: list[str], output: list[str], fnam: str) -> None:
"""Transform comments such as '# E: message' or
'# E:3: message' in input.
The result is lines like 'fnam:line: error: message'.
"""
for i in range(len(input)):
# The first in the split things isn't a comment
for possible_err_comment in input[i].split(" # ")[1:]:
m = re.search(
r"^([ENW]):((?P<col>\d+):)? (?P<message>.*)$", possible_err_comment.strip()
)
if m:
if m.group(1) == "E":
severity = "error"
elif m.group(1) == "N":
severity = "note"
elif m.group(1) == "W":
severity = "warning"
col = m.group("col")
message = m.group("message")
message = message.replace("\\#", "#") # adds back escaped # character
if col is None:
output.append(f"{fnam}:{i + 1}: {severity}: {message}")
else:
output.append(f"{fnam}:{i + 1}:{col}: {severity}: {message}")
def fix_win_path(line: str) -> str:
r"""Changes Windows paths to Linux paths in error messages.
E.g. foo\bar.py -> foo/bar.py.
"""
line = line.replace(root_dir, root_dir.replace("\\", "/"))
m = re.match(r"^([\S/]+):(\d+:)?(\s+.*)", line)
if not m:
return line
else:
filename, lineno, message = m.groups()
return "{}:{}{}".format(filename.replace("\\", "/"), lineno or "", message)
def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return "{}{}{}".format(
line[: m.start(1)], m.group("filename").replace("\\", "/"), line[m.end(1) :]
)
##
#
# pytest setup
#
##
def pytest_sessionstart(session: Any) -> None:
# Clean up directory where mypyc tests write intermediate files on failure
# to avoid any confusion between test runs
if os.path.isdir(mypyc_output_dir):
shutil.rmtree(mypyc_output_dir)
# This function name is special to pytest. See
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup("mypy")
group.addoption(
"--update-data",
action="store_true",
default=False,
help="Update test data to reflect actual output (supported only for certain tests)",
)
group.addoption(
"--save-failures-to",
default=None,
help="Copy the temp directories from failing tests to a target directory",
)
group.addoption(
"--mypy-verbose", action="count", help="Set the verbose flag when creating mypy Options"
)
group.addoption(
"--mypyc-showc",
action="store_true",
default=False,
help="Display C code on mypyc test failures",
)
group.addoption(
"--mypyc-debug",
default=None,
dest="debugger",
choices=SUPPORTED_DEBUGGERS,
help="Run the first mypyc run test with the specified debugger",
)
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config: pytest.Config) -> None:
if config.getoption("--collectonly"):
return
# --update-data is not compatible with parallelized tests, disable parallelization
if config.getoption("--update-data"):
config.option.numprocesses = 0
# This function name is special to pytest. See
# https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks
def pytest_pycollect_makeitem(collector: Any, name: str, obj: object) -> Any | None:
"""Called by pytest on each object in modules configured in conftest.py files.
collector is pytest.Collector, returns Optional[pytest.Class]
"""
if isinstance(obj, type):
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
if issubclass(obj, DataSuite) and obj is not DataSuite:
# Non-None result means this obj is a test case.
# The collect method of the returned DataSuiteCollector instance will be called later,
# with self.obj being obj.
return DataSuiteCollector.from_parent(parent=collector, name=name)
return None
_case_name_pattern = re.compile(
r"(?P<name>[a-zA-Z_0-9]+)"
r"(?P<writescache>-writescache)?"
r"(?P<only_when>-only_when_cache|-only_when_nocache)?"
r"(?P<skip_path_normalization>-skip_path_normalization)?"
r"(-(?P<platform>posix|windows))?"
r"(?P<skip>-skip)?"
r"(?P<xfail>-xfail)?"
)
def split_test_cases(
parent: DataFileCollector, suite: DataSuite, file: str
) -> Iterator[DataDrivenTestCase]:
"""Iterate over raw test cases in file, at collection time, ignoring sub items.
The collection phase is slow, so any heavy processing should be deferred to after
uninteresting tests are filtered (when using -k PATTERN switch).
"""
with open(file, encoding="utf-8") as f:
data = f.read()
cases = re.split(r"^\[case ([^]+)]+)\][ \t]*$\n", data, flags=re.DOTALL | re.MULTILINE)
cases_iter = iter(cases)
line_no = next(cases_iter).count("\n") + 1
test_names = set()
for case_id in cases_iter:
data = next(cases_iter)
m = _case_name_pattern.fullmatch(case_id)
if not m:
raise RuntimeError(f"Invalid testcase id {case_id!r}")
name = m.group("name")
if name in test_names:
raise RuntimeError(
'Found a duplicate test name "{}" in {} on line {}'.format(
name, parent.name, line_no
)
)
yield DataDrivenTestCase.from_parent(
parent=parent,
suite=suite,
file=file,
name=add_test_name_suffix(name, suite.test_name_suffix),
writescache=bool(m.group("writescache")),
only_when=m.group("only_when"),
platform=m.group("platform"),
skip=bool(m.group("skip")),
xfail=bool(m.group("xfail")),
normalize_output=not m.group("skip_path_normalization"),
data=data,
line=line_no,
)
line_no += data.count("\n") + 1
# Record existing tests to prevent duplicates:
test_names.update({name})
| TestItem |
python | realpython__materials | game-of-life-python/source_code_final/rplife/grid.py | {
"start": 45,
"end": 1538
} | class ____:
def __init__(self, pattern):
self.pattern = pattern
def evolve(self):
neighbors = (
(-1, -1), # Above left
(-1, 0), # Above
(-1, 1), # Above right
(0, -1), # Left
(0, 1), # Right
(1, -1), # Below left
(1, 0), # Below
(1, 1), # Below right
)
num_neighbors = collections.defaultdict(int)
for row, col in self.pattern.alive_cells:
for drow, dcol in neighbors:
num_neighbors[(row + drow, col + dcol)] += 1
stay_alive = {
cell for cell, num in num_neighbors.items() if num in {2, 3}
} & self.pattern.alive_cells
come_alive = {
cell for cell, num in num_neighbors.items() if num == 3
} - self.pattern.alive_cells
self.pattern.alive_cells = stay_alive | come_alive
def as_string(self, bbox):
start_col, start_row, end_col, end_row = bbox
display = [self.pattern.name.center(2 * (end_col - start_col))]
for row in range(start_row, end_row):
display_row = [
ALIVE if (row, col) in self.pattern.alive_cells else DEAD
for col in range(start_col, end_col)
]
display.append(" ".join(display_row))
return "\n ".join(display)
def __str__(self):
return f"{self.pattern.name}:\nAlive cells -> {sorted(self.pattern.alive_cells)}"
| LifeGrid |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/rapidsmpf/collectives/common.py | {
"start": 1321,
"end": 2913
} | class ____:
"""
Context manager to reserve collective IDs for pipeline execution.
Parameters
----------
ir : IR
The root IR node of the pipeline.
Notes
-----
This context manager:
1. Identifies all Shuffle nodes in the IR
2. Reserves collective IDs from the vacancy pool
3. Creates a mapping from IR nodes to their reserved IDs
4. Releases all IDs back to the pool on __exit__
"""
def __init__(self, ir: IR):
# Find all collective IR nodes.
self.collective_nodes: list[IR] = [
node
for node in traversal([ir])
if isinstance(node, (Shuffle, Join, Repartition))
]
self.collective_id_map: dict[IR, int] = {}
def __enter__(self) -> dict[IR, int]:
"""
Reserve collective IDs and return the mapping.
Returns
-------
collective_id_map : dict[IR, int]
Mapping from IR nodes to their reserved collective IDs.
"""
# Reserve IDs and map nodes directly to their IDs
for node in self.collective_nodes:
self.collective_id_map[node] = _get_new_collective_id()
return self.collective_id_map
def __exit__(
self,
exc_type: type | None,
exc_val: Exception | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
"""Release all reserved collective IDs back to the vacancy pool."""
for collective_id in self.collective_id_map.values():
_release_collective_id(collective_id)
return False
| ReserveOpIDs |
python | doocs__leetcode | solution/0800-0899/0829.Consecutive Numbers Sum/Solution.py | {
"start": 0,
"end": 258
} | class ____:
def consecutiveNumbersSum(self, n: int) -> int:
n <<= 1
ans, k = 0, 1
while k * (k + 1) <= n:
if n % k == 0 and (n // k - k + 1) % 2 == 0:
ans += 1
k += 1
return ans
| Solution |
python | nedbat__coveragepy | tests/test_coverage.py | {
"start": 41025,
"end": 44724
} | class ____(CoverageTest):
"""Tests of new syntax in Python 2.5."""
def test_with_statement(self) -> None:
self.check_coverage(
"""\
class Managed:
def __enter__(self):
desc = "enter"
def __exit__(self, type, value, tb):
desc = "exit"
m = Managed()
with m:
desc = "block1a"
desc = "block1b"
try:
with m:
desc = "block2"
raise Exception("Boo!")
except:
desc = "caught"
""",
lines=[1, 2, 3, 5, 6, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18],
missing="",
)
def test_try_except_finally(self) -> None:
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
except:
a = 99
finally:
b = 2
assert a == 1 and b == 2
""",
lines=[1, 2, 3, 4, 5, 7, 8],
missing="4-5",
branchz="",
branchz_missing="",
)
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
raise Exception("foo")
except:
a = 99
finally:
b = 2
assert a == 99 and b == 2
""",
lines=[1, 2, 3, 4, 5, 6, 8, 9],
missing="",
branchz="",
branchz_missing="",
)
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
raise Exception("foo")
except ImportError:
a = 99
except:
a = 123
finally:
b = 2
assert a == 123 and b == 2
""",
lines=[1, 2, 3, 4, 5, 6, 7, 8, 10, 11],
missing="6",
branchz="",
branchz_missing="",
)
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
raise IOError("foo")
except ImportError:
a = 99
except IOError:
a = 17
except:
a = 123
finally:
b = 2
assert a == 17 and b == 2
""",
lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13],
missing="6, 9-10",
branchz="",
branchz_missing="",
)
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
except:
a = 99
else:
a = 123
finally:
b = 2
assert a == 123 and b == 2
""",
lines=[1, 2, 3, 4, 5, 7, 9, 10],
missing="4-5",
branchz="",
branchz_missing="",
)
def test_try_except_finally_stranded_else(self) -> None:
self.check_coverage(
"""\
a = 0; b = 0
try:
a = 1
raise Exception("foo")
except:
a = 99
else:
a = 123
finally:
b = 2
assert a == 99 and b == 2
""",
# The else can't be reached because the try ends with a raise.
lines=[1, 2, 3, 4, 5, 6, 10, 11],
missing="",
branchz="",
branchz_missing="",
)
| Py25Test |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 81855,
"end": 83724
} | class ____:
def test_simple(self):
x = np.arange(-10, 10, .1)
r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
# check integral of normal equals 1
assert_almost_equal(r, 1, 7)
def test_ndim(self):
x = np.linspace(0, 1, 3)
y = np.linspace(0, 2, 8)
z = np.linspace(0, 3, 13)
wx = np.ones_like(x) * (x[1] - x[0])
wx[0] /= 2
wx[-1] /= 2
wy = np.ones_like(y) * (y[1] - y[0])
wy[0] /= 2
wy[-1] /= 2
wz = np.ones_like(z) * (z[1] - z[0])
wz[0] /= 2
wz[-1] /= 2
q = x[:, None, None] + y[None, :, None] + z[None, None, :]
qx = (q * wx[:, None, None]).sum(axis=0)
qy = (q * wy[None, :, None]).sum(axis=1)
qz = (q * wz[None, None, :]).sum(axis=2)
# n-d `x`
r = trapezoid(q, x=x[:, None, None], axis=0)
assert_almost_equal(r, qx)
r = trapezoid(q, x=y[None, :, None], axis=1)
assert_almost_equal(r, qy)
r = trapezoid(q, x=z[None, None, :], axis=2)
assert_almost_equal(r, qz)
# 1-d `x`
r = trapezoid(q, x=x, axis=0)
assert_almost_equal(r, qx)
r = trapezoid(q, x=y, axis=1)
assert_almost_equal(r, qy)
r = trapezoid(q, x=z, axis=2)
assert_almost_equal(r, qz)
def test_masked(self):
# Testing that masked arrays behave as if the function is 0 where
# masked
x = np.arange(5)
y = x * x
mask = x == 2
ym = np.ma.array(y, mask=mask)
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
assert_almost_equal(trapezoid(ym, x), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapezoid(ym, xm), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapezoid(y, xm), r)
| TestTrapezoid |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/functional_modules.py | {
"start": 163,
"end": 2819
} | class ____(torch.nn.Module):
r"""State collector class for float operations.
The instance of this class can be used instead of the ``torch.`` prefix for
some operations. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> f_add = FloatFunctional()
>>> a = torch.tensor(3.0)
>>> b = torch.tensor(4.0)
>>> f_add.add(a, b) # Equivalent to ``torch.add(a, b)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self) -> None:
super().__init__()
self.activation_post_process = torch.nn.Identity()
def forward(self, x):
raise RuntimeError(
"FloatFunctional is not intended to use the "
+ "'forward'. Please use the underlying operation"
)
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x: list[Tensor], dim: int = 0) -> Tensor:
r = torch.cat(x, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = torch.nn.functional.relu(r)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.matmul(Tensor, Tensor)``"""
def matmul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.matmul(x, y)
r = self.activation_post_process(r)
return r
| FloatFunctional |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 9538,
"end": 10649
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SpeechT5
| SpeechT5LayerNormConvLayer |
python | great-expectations__great_expectations | great_expectations/expectations/expectation.py | {
"start": 13715,
"end": 14578
} | class ____(ModelMetaclass):
"""MetaExpectation registers Expectations as they are defined, adding them to the Expectation registry.
Any class inheriting from Expectation will be registered based on the value of the "expectation_type" class
attribute, or, if that is not set, by snake-casing the name of the class.
""" # noqa: E501 # FIXME CoP
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
# noinspection PyUnresolvedReferences
if not newclass.is_abstract():
newclass.expectation_type = camel_to_snake(clsname)
register_expectation(newclass)
else:
newclass.expectation_type = ""
# noinspection PyUnresolvedReferences
newclass._register_renderer_functions()
return newclass
@public_api
| MetaExpectation |
python | django-guardian__django-guardian | guardian/models/models.py | {
"start": 7210,
"end": 8244
} | class ____(GroupObjectPermissionAbstract):
"""The default implementation of the GroupObjectPermissionAbstract model.
If `GUARDIAN_GROUP_OBJ_PERMS_MODEL` is not set at the beginning of the project, this model will be used.
Uses Django's contenttypes framework to store generic relations.
See Also:
- [Django's Documentation on Abstract Base Models](https://docs.djangoproject.com/en/stable/topics/db/models/#abstract-base-classes)
- [Django-Guardian Performance Tuning](https://django-guardian.readthedocs.io/en/stable/userguide/performance.html)
- [How to override the default GroupObjectPermission](https://django-guardian.readthedocs.io/en/stable/configuration.html#guardian-user-obj-perms-model)
"""
class Meta(GroupObjectPermissionAbstract.Meta):
abstract = False
indexes = [
models.Index(fields=["permission", "group", "content_type", "object_pk"]),
models.Index(fields=["group", "content_type", "object_pk"]),
]
| GroupObjectPermission |
python | coleifer__peewee | tests/model_sql.py | {
"start": 36237,
"end": 38832
} | class ____(ModelDatabaseTestCase):
requires = [Emp, OCTest, UKVP]
def test_atomic_update(self):
query = OCTest.insert(a='foo', b=1).on_conflict(
conflict_target=(OCTest.a,),
update={OCTest.b: OCTest.b + 2})
self.assertSQL(query, (
'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) '
'ON CONFLICT ("a") '
'DO UPDATE SET "b" = ("oc_test"."b" + ?) '
'RETURNING "oc_test"."id"'), ['foo', 1, 0, 2])
def test_on_conflict_do_nothing(self):
query = OCTest.insert(a='foo', b=1).on_conflict(action='IGNORE')
self.assertSQL(query, (
'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) '
'ON CONFLICT DO NOTHING '
'RETURNING "oc_test"."id"'), ['foo', 1, 0])
query = OCTest.insert(a='foo', b=1).on_conflict(
conflict_target=(OCTest.a,),
action='IGNORE')
self.assertSQL(query, (
'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) '
'ON CONFLICT ("a") DO NOTHING '
'RETURNING "oc_test"."id"'), ['foo', 1, 0])
def test_update_where_clause(self):
# Add a new row with the given "a" value. If a conflict occurs,
# re-insert with b=b+2 so long as the original b < 3.
query = OCTest.insert(a='foo', b=1).on_conflict(
conflict_target=(OCTest.a,),
update={OCTest.b: OCTest.b + 2},
where=(OCTest.b < 3))
self.assertSQL(query, (
'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) '
'ON CONFLICT ("a") DO UPDATE SET "b" = ("oc_test"."b" + ?) '
'WHERE ("oc_test"."b" < ?) '
'RETURNING "oc_test"."id"'), ['foo', 1, 0, 2, 3])
def test_conflict_target_constraint_where(self):
fields = [UKVP.key, UKVP.value, UKVP.extra]
data = [('k1', 1, 2), ('k2', 2, 3)]
query = (UKVP.insert_many(data, fields)
.on_conflict(conflict_target=(UKVP.key, UKVP.value),
conflict_where=(UKVP.extra > 1),
preserve=(UKVP.extra,),
where=(UKVP.key != 'kx')))
self.assertSQL(query, (
'INSERT INTO "ukvp" ("key", "value", "extra") '
'VALUES (?, ?, ?), (?, ?, ?) '
'ON CONFLICT ("key", "value") WHERE ("extra" > ?) '
'DO UPDATE SET "extra" = EXCLUDED."extra" '
'WHERE ("ukvp"."key" != ?) RETURNING "ukvp"."id"'),
['k1', 1, 2, 'k2', 2, 3, 1, 'kx'])
| TestOnConflictSQL |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 14946,
"end": 17226
} | class ____(fixtures.TestBase):
@testing.combinations(
(String(), String()),
(VARBINARY(), LargeBinary()),
(mysql.BINARY(), LargeBinary()),
(mysql.MEDIUMBLOB(), LargeBinary()),
(oracle.RAW(), LargeBinary()),
(pg.BYTEA(), LargeBinary()),
(VARCHAR(length=100), String(length=100)),
(NVARCHAR(length=100), Unicode(length=100)),
(DATE(), Date()),
(pg.JSON(), sa.JSON()),
(pg.ARRAY(sa.String), sa.ARRAY(sa.String)),
(Enum("a", "b", "c"), Enum("a", "b", "c")),
(pg.ENUM("a", "b", "c", name="pgenum"), Enum("a", "b", "c")),
(mysql.ENUM("a", "b", "c"), Enum("a", "b", "c")),
(pg.INTERVAL(precision=5), Interval(native=True, second_precision=5)),
(
oracle.INTERVAL(second_precision=5, day_precision=5),
Interval(native=True, day_precision=5, second_precision=5),
),
)
def test_as_generic(self, t1, t2):
assert repr(t1.as_generic(allow_nulltype=False)) == repr(t2)
@testing.combinations(
*[
(t,)
for t in _all_types(omit_special_types=True)
if not util.method_is_overridden(t, TypeEngine.as_generic)
and not util.method_is_overridden(
t, TypeEngine._generic_type_affinity
)
]
)
def test_as_generic_all_types_heuristic(self, type_):
t1 = _get_instance(type_)
try:
gentype = t1.as_generic()
except NotImplementedError:
pass
else:
assert isinstance(t1, gentype.__class__)
assert isinstance(gentype, TypeEngine)
gentype = t1.as_generic(allow_nulltype=True)
if not isinstance(gentype, types.NULLTYPE.__class__):
assert isinstance(t1, gentype.__class__)
assert isinstance(gentype, TypeEngine)
@testing.combinations(
*[
(t,)
for t in _all_types(omit_special_types=True)
if util.method_is_overridden(t, TypeEngine.as_generic)
]
)
def test_as_generic_all_types_custom(self, type_):
t1 = _get_instance(type_)
gentype = t1.as_generic(allow_nulltype=False)
assert isinstance(gentype, TypeEngine)
| AsGenericTest |
python | getsentry__sentry | tests/acceptance/test_project_servicehooks.py | {
"start": 179,
"end": 2409
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.list_hooks_path = f"/settings/{self.org.slug}/projects/{self.project.slug}/hooks/"
self.new_hook_path = f"/settings/{self.org.slug}/projects/{self.project.slug}/hooks/new/"
def test_simple(self) -> None:
with self.feature("projects:servicehooks"):
self.browser.get(self.list_hooks_path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
# click "New"
self.browser.click('[data-test-id="new-service-hook"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.current_url == f"{self.browser.live_server_url}{self.new_hook_path}"
self.browser.element('input[name="url"]').send_keys("https://example.com/hook")
# click "Save Changes"
self.browser.click('form [data-test-id="form-submit"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert (
self.browser.current_url == f"{self.browser.live_server_url}{self.list_hooks_path}"
)
hook = ServiceHook.objects.get(project_id=self.project.id)
assert hook.url == "https://example.com/hook"
assert not hook.events
# hopefully click the first service hook
self.browser.click('[data-test-id="project-service-hook"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.current_url == "{}{}".format(
self.browser.live_server_url,
f"/settings/{self.org.slug}/projects/{self.project.slug}/hooks/{hook.guid}/",
)
| ProjectServiceHooksTest |
python | Textualize__textual | tests/selection_list/test_selection_click_checkbox.py | {
"start": 241,
"end": 1520
} | class ____(App[None]):
"""Test selection list application."""
def __init__(self) -> None:
super().__init__()
self.clicks: list[int] = []
def compose(self) -> ComposeResult:
yield SelectionList[int](*[(str(n), n) for n in range(10)])
@on(SelectionList.SelectionToggled)
def _record(self, event: SelectionList.SelectionToggled) -> None:
assert event.control == self.query_one(SelectionList)
self.clicks.append(event.selection_index)
async def test_click_on_prompt() -> None:
"""It should be possible to toggle a selection by clicking on the prompt."""
async with SelectionListApp().run_test() as pilot:
assert isinstance(pilot.app, SelectionListApp)
await pilot.click(SelectionList, Offset(5,1))
await pilot.pause()
assert pilot.app.clicks == [0]
async def test_click_on_checkbox() -> None:
"""It should be possible to toggle a selection by clicking on the checkbox."""
async with SelectionListApp().run_test() as pilot:
assert isinstance(pilot.app, SelectionListApp)
await pilot.click(SelectionList, Offset(3,1))
await pilot.pause()
assert pilot.app.clicks == [0]
if __name__ == "__main__":
SelectionListApp().run()
| SelectionListApp |
python | pypa__pipenv | pipenv/vendor/packaging/specifiers.py | {
"start": 26456,
"end": 39742
} | class ____(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(self, specifiers: str = "", prereleases: bool | None = None) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Make each individual specifier a Specifier and save in a frozen set for later.
self._specs = frozenset(map(Specifier, split_specifiers))
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> bool | None:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: SpecifierSet | str) -> SpecifierSet:
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: bool | None = None,
installed: bool | None = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: list[UnparsedVersionVar] = []
found_prereleases: list[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
| SpecifierSet |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 106554,
"end": 107668
} | class ____(fixtures.TestBase):
"""test that local-remote is correctly determined for m2m"""
def test_local_remote(self, registry):
meta = MetaData()
t1 = Table("t1", meta, Column("id", Integer, primary_key=True))
t2 = Table("t2", meta, Column("id", Integer, primary_key=True))
t12 = Table(
"tab",
meta,
Column("t1_id", Integer, ForeignKey("t1.id")),
Column("t2_id", Integer, ForeignKey("t2.id")),
)
class A:
pass
class B:
pass
registry.map_imperatively(B, t2)
m = registry.map_imperatively(
A,
t1,
properties=dict(
b_view=relationship(B, secondary=t12, viewonly=True),
b_plain=relationship(B, secondary=t12),
),
)
configure_mappers()
assert (
m.get_property("b_view").local_remote_pairs
== m.get_property("b_plain").local_remote_pairs
== [(t1.c.id, t12.c.t1_id), (t2.c.id, t12.c.t2_id)]
)
| ViewOnlyLocalRemoteM2M |
python | sphinx-doc__sphinx | sphinx/builders/linkcheck.py | {
"start": 7557,
"end": 9884
} | class ____(SphinxPostTransform):
builders = ('linkcheck',)
default_priority = 800
def run(self, **kwargs: Any) -> None:
for node in self.document.findall():
if uri := self.find_uri(node):
self._add_uri(uri, node)
def find_uri(self, node: nodes.Element) -> str | None:
"""Find a URI for a given node.
This call can be used to retrieve a URI from a provided node. If no
URI exists for a provided node, this call will return ``None``.
This method can be useful for extension developers who wish to
easily inject hyperlinks into a builder by only needing to override
this method.
:param node: A node class
:returns: URI of the node
"""
# reference nodes
if isinstance(node, nodes.reference):
if 'refuri' in node:
return node['refuri']
# image nodes
if isinstance(node, nodes.image):
uri = node['candidates'].get('?')
if uri and '://' in uri:
return uri
# raw nodes
if isinstance(node, nodes.raw):
uri = node.get('source')
if uri and '://' in uri:
return uri
return None
def _add_uri(self, uri: str, node: nodes.Element) -> None:
"""Registers a node's URI into a builder's collection of hyperlinks.
Provides the ability to register a URI value determined from a node
into the linkcheck's builder. URI's processed through this call can
be manipulated through a ``linkcheck-process-uri`` event before the
builder attempts to validate.
:param uri: URI to add
:param node: A node class where the URI was found
"""
builder = cast('CheckExternalLinksBuilder', self.env._app.builder)
hyperlinks = builder.hyperlinks
docname = self.env.current_document.docname
if newuri := self.env.events.emit_firstresult('linkcheck-process-uri', uri):
uri = newuri
try:
lineno = get_node_line(node)
except ValueError:
lineno = -1
if uri not in hyperlinks:
hyperlinks[uri] = Hyperlink(
uri, docname, self.env.doc2path(docname), lineno
)
| HyperlinkCollector |
python | apache__airflow | providers/google/tests/unit/google/common/hooks/test_base_google.py | {
"start": 5391,
"end": 6385
} | class ____:
def test_no_arguments(self):
gcp_hook = FallbackToDefaultProjectIdFixtureClass(321)
gcp_hook.method()
gcp_hook.mock.assert_called_once_with(project_id=321)
def test_default_project_id(self):
gcp_hook = FallbackToDefaultProjectIdFixtureClass(321)
gcp_hook.method(project_id=None)
gcp_hook.mock.assert_called_once_with(project_id=321)
def test_provided_project_id(self):
gcp_hook = FallbackToDefaultProjectIdFixtureClass(321)
gcp_hook.method(project_id=123)
gcp_hook.mock.assert_called_once_with(project_id=123)
def test_restrict_positional_arguments(self):
gcp_hook = FallbackToDefaultProjectIdFixtureClass(321)
with pytest.raises(AirflowException) as ctx:
gcp_hook.method(123)
assert str(ctx.value) == "You must use keyword arguments in this methods rather than positional"
assert gcp_hook.mock.call_count == 0
| TestFallbackToDefaultProjectId |
python | boto__boto3 | boto3/resources/model.py | {
"start": 7030,
"end": 20336
} | class ____:
"""
A model representing a resource, defined via a JSON description
format. A resource has identifiers, attributes, actions,
sub-resources, references and collections. For more information
on resources, see :ref:`guide_resources`.
:type name: string
:param name: The name of this resource, e.g. ``sqs`` or ``Queue``
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
self._renamed = {}
#: (``string``) The name of this resource
self.name = name
#: (``string``) The service shape name for this resource or ``None``
self.shape = definition.get('shape')
def load_rename_map(self, shape=None):
"""
Load a name translation map given a shape. This will set
up renamed values for any collisions, e.g. if the shape,
an action, and a subresource all are all named ``foo``
then the resource will have an action ``foo``, a subresource
named ``Foo`` and a property named ``foo_attribute``.
This is the order of precedence, from most important to
least important:
* Load action (resource.load)
* Identifiers
* Actions
* Subresources
* References
* Collections
* Waiters
* Attributes (shape members)
Batch actions are only exposed on collections, so do not
get modified here. Subresources use upper camel casing, so
are unlikely to collide with anything but other subresources.
Creates a structure like this::
renames = {
('action', 'id'): 'id_action',
('collection', 'id'): 'id_collection',
('attribute', 'id'): 'id_attribute'
}
# Get the final name for an action named 'id'
name = renames.get(('action', 'id'), 'id')
:type shape: botocore.model.Shape
:param shape: The underlying shape for this resource.
"""
# Meta is a reserved name for resources
names = {'meta'}
self._renamed = {}
if self._definition.get('load'):
names.add('load')
for item in self._definition.get('identifiers', []):
self._load_name_with_category(names, item['name'], 'identifier')
for name in self._definition.get('actions', {}):
self._load_name_with_category(names, name, 'action')
for name, ref in self._get_has_definition().items():
# Subresources require no data members, just typically
# identifiers and user input.
data_required = False
for identifier in ref['resource']['identifiers']:
if identifier['source'] == 'data':
data_required = True
break
if not data_required:
self._load_name_with_category(
names, name, 'subresource', snake_case=False
)
else:
self._load_name_with_category(names, name, 'reference')
for name in self._definition.get('hasMany', {}):
self._load_name_with_category(names, name, 'collection')
for name in self._definition.get('waiters', {}):
self._load_name_with_category(
names, Waiter.PREFIX + name, 'waiter'
)
if shape is not None:
for name in shape.members.keys():
self._load_name_with_category(names, name, 'attribute')
def _load_name_with_category(self, names, name, category, snake_case=True):
"""
Load a name with a given category, possibly renaming it
if that name is already in use. The name will be stored
in ``names`` and possibly be set up in ``self._renamed``.
:type names: set
:param names: Existing names (Python attributes, properties, or
methods) on the resource.
:type name: string
:param name: The original name of the value.
:type category: string
:param category: The value type, such as 'identifier' or 'action'
:type snake_case: bool
:param snake_case: True (default) if the name should be snake cased.
"""
if snake_case:
name = xform_name(name)
if name in names:
logger.debug(f'Renaming {self.name} {category} {name}')
self._renamed[(category, name)] = name + '_' + category
name += '_' + category
if name in names:
# This isn't good, let's raise instead of trying to keep
# renaming this value.
raise ValueError(
f'Problem renaming {self.name} {category} to {name}!'
)
names.add(name)
def _get_name(self, category, name, snake_case=True):
"""
Get a possibly renamed value given a category and name. This
uses the rename map set up in ``load_rename_map``, so that
method must be called once first.
:type category: string
:param category: The value type, such as 'identifier' or 'action'
:type name: string
:param name: The original name of the value
:type snake_case: bool
:param snake_case: True (default) if the name should be snake cased.
:rtype: string
:return: Either the renamed value if it is set, otherwise the
original name.
"""
if snake_case:
name = xform_name(name)
return self._renamed.get((category, name), name)
def get_attributes(self, shape):
"""
Get a dictionary of attribute names to original name and shape
models that represent the attributes of this resource. Looks
like the following:
{
'some_name': ('SomeName', <Shape...>)
}
:type shape: botocore.model.Shape
:param shape: The underlying shape for this resource.
:rtype: dict
:return: Mapping of resource attributes.
"""
attributes = {}
identifier_names = [i.name for i in self.identifiers]
for name, member in shape.members.items():
snake_cased = xform_name(name)
if snake_cased in identifier_names:
# Skip identifiers, these are set through other means
continue
snake_cased = self._get_name(
'attribute', snake_cased, snake_case=False
)
attributes[snake_cased] = (name, member)
return attributes
@property
def identifiers(self):
"""
Get a list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
name = self._get_name('identifier', item['name'])
member_name = item.get('memberName', None)
if member_name:
member_name = self._get_name('attribute', member_name)
identifiers.append(Identifier(name, member_name))
return identifiers
@property
def load(self):
"""
Get the load action for this resource, if it is defined.
:type: :py:class:`Action` or ``None``
"""
action = self._definition.get('load')
if action is not None:
action = Action('load', action, self._resource_defs)
return action
@property
def actions(self):
"""
Get a list of actions for this resource.
:type: list(:py:class:`Action`)
"""
actions = []
for name, item in self._definition.get('actions', {}).items():
name = self._get_name('action', name)
actions.append(Action(name, item, self._resource_defs))
return actions
@property
def batch_actions(self):
"""
Get a list of batch actions for this resource.
:type: list(:py:class:`Action`)
"""
actions = []
for name, item in self._definition.get('batchActions', {}).items():
name = self._get_name('batch_action', name)
actions.append(Action(name, item, self._resource_defs))
return actions
def _get_has_definition(self):
"""
Get a ``has`` relationship definition from a model, where the
service resource model is treated special in that it contains
a relationship to every resource defined for the service. This
allows things like ``s3.Object('bucket-name', 'key')`` to
work even though the JSON doesn't define it explicitly.
:rtype: dict
:return: Mapping of names to subresource and reference
definitions.
"""
if self.name not in self._resource_defs:
# This is the service resource, so let us expose all of
# the defined resources as subresources.
definition = {}
for name, resource_def in self._resource_defs.items():
# It's possible for the service to have renamed a
# resource or to have defined multiple names that
# point to the same resource type, so we need to
# take that into account.
found = False
has_items = self._definition.get('has', {}).items()
for has_name, has_def in has_items:
if has_def.get('resource', {}).get('type') == name:
definition[has_name] = has_def
found = True
if not found:
# Create a relationship definition and attach it
# to the model, such that all identifiers must be
# supplied by the user. It will look something like:
#
# {
# 'resource': {
# 'type': 'ResourceName',
# 'identifiers': [
# {'target': 'Name1', 'source': 'input'},
# {'target': 'Name2', 'source': 'input'},
# ...
# ]
# }
# }
#
fake_has = {'resource': {'type': name, 'identifiers': []}}
for identifier in resource_def.get('identifiers', []):
fake_has['resource']['identifiers'].append(
{'target': identifier['name'], 'source': 'input'}
)
definition[name] = fake_has
else:
definition = self._definition.get('has', {})
return definition
def _get_related_resources(self, subresources):
"""
Get a list of sub-resources or references.
:type subresources: bool
:param subresources: ``True`` to get sub-resources, ``False`` to
get references.
:rtype: list(:py:class:`Action`)
"""
resources = []
for name, definition in self._get_has_definition().items():
if subresources:
name = self._get_name('subresource', name, snake_case=False)
else:
name = self._get_name('reference', name)
action = Action(name, definition, self._resource_defs)
data_required = False
for identifier in action.resource.identifiers:
if identifier.source == 'data':
data_required = True
break
if subresources and not data_required:
resources.append(action)
elif not subresources and data_required:
resources.append(action)
return resources
@property
def subresources(self):
"""
Get a list of sub-resources.
:type: list(:py:class:`Action`)
"""
return self._get_related_resources(True)
@property
def references(self):
"""
Get a list of reference resources.
:type: list(:py:class:`Action`)
"""
return self._get_related_resources(False)
@property
def collections(self):
"""
Get a list of collections for this resource.
:type: list(:py:class:`Collection`)
"""
collections = []
for name, item in self._definition.get('hasMany', {}).items():
name = self._get_name('collection', name)
collections.append(Collection(name, item, self._resource_defs))
return collections
@property
def waiters(self):
"""
Get a list of waiters for this resource.
:type: list(:py:class:`Waiter`)
"""
waiters = []
for name, item in self._definition.get('waiters', {}).items():
name = self._get_name('waiter', Waiter.PREFIX + name)
waiters.append(Waiter(name, item))
return waiters
| ResourceModel |
python | redis__redis-py | redis/commands/core.py | {
"start": 218306,
"end": 220861
} | class ____(CommandsProtocol):
"""
Redis PubSub commands.
see https://redis.io/topics/pubsub
"""
def publish(self, channel: ChannelT, message: EncodableT, **kwargs) -> ResponseT:
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
For more information, see https://redis.io/commands/publish
"""
return self.execute_command("PUBLISH", channel, message, **kwargs)
def spublish(self, shard_channel: ChannelT, message: EncodableT) -> ResponseT:
"""
Posts a message to the given shard channel.
Returns the number of clients that received the message
For more information, see https://redis.io/commands/spublish
"""
return self.execute_command("SPUBLISH", shard_channel, message)
def pubsub_channels(self, pattern: PatternT = "*", **kwargs) -> ResponseT:
"""
Return a list of channels that have at least one subscriber
For more information, see https://redis.io/commands/pubsub-channels
"""
return self.execute_command("PUBSUB CHANNELS", pattern, **kwargs)
def pubsub_shardchannels(self, pattern: PatternT = "*", **kwargs) -> ResponseT:
"""
Return a list of shard_channels that have at least one subscriber
For more information, see https://redis.io/commands/pubsub-shardchannels
"""
return self.execute_command("PUBSUB SHARDCHANNELS", pattern, **kwargs)
def pubsub_numpat(self, **kwargs) -> ResponseT:
"""
Returns the number of subscriptions to patterns
For more information, see https://redis.io/commands/pubsub-numpat
"""
return self.execute_command("PUBSUB NUMPAT", **kwargs)
def pubsub_numsub(self, *args: ChannelT, **kwargs) -> ResponseT:
"""
Return a list of (channel, number of subscribers) tuples
for each channel given in ``*args``
For more information, see https://redis.io/commands/pubsub-numsub
"""
return self.execute_command("PUBSUB NUMSUB", *args, **kwargs)
def pubsub_shardnumsub(self, *args: ChannelT, **kwargs) -> ResponseT:
"""
Return a list of (shard_channel, number of subscribers) tuples
for each channel given in ``*args``
For more information, see https://redis.io/commands/pubsub-shardnumsub
"""
return self.execute_command("PUBSUB SHARDNUMSUB", *args, **kwargs)
AsyncPubSubCommands = PubSubCommands
| PubSubCommands |
python | getsentry__sentry | tests/sentry/core/endpoints/test_team_projects.py | {
"start": 613,
"end": 1585
} | class ____(APITestCase):
endpoint = "sentry-api-0-team-project-index"
method = "get"
def setUp(self) -> None:
super().setUp()
self.team = self.create_team(members=[self.user])
self.proj1 = self.create_project(teams=[self.team])
self.proj2 = self.create_project(teams=[self.team])
self.login_as(user=self.user)
def test_simple(self) -> None:
response = self.get_success_response(
self.organization.slug, self.team.slug, status_code=200
)
project_ids = {item["id"] for item in response.data}
assert len(response.data) == 2
assert project_ids == {str(self.proj1.id), str(self.proj2.id)}
def test_excludes_project(self) -> None:
proj3 = self.create_project()
response = self.get_success_response(
self.organization.slug, self.team.slug, status_code=200
)
assert str(proj3.id) not in response.data
| TeamProjectsListTest |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 130347,
"end": 133182
} | class ____(TestCase):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(1000) * 100
mask = x < 40
for val in [-100, 0, 15]:
for types in "efdFDBbhil?":
for T in types:
if T not in unchecked_types:
if val < 0 and np.dtype(T).kind == "u":
val = np.iinfo(T).max - 99
self.tst_basic(x.copy().astype(T), T, mask, val)
# Also test string of a length which uses an untypical length
dt = np.dtype("S3")
self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@parametrize("greater", (True, False))
def test_byteorder(self, greater):
dtype = ">i4" if greater else "<i4"
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array(
[(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[("x", "<f8"), ("y", ">f8"), ("z", "<f8")],
)
np.putmask(rec["x"], [True, False], 10)
assert_array_equal(rec["x"], [10, 5])
assert_array_equal(rec["y"], [2, 4])
assert_array_equal(rec["z"], [3, 3])
np.putmask(rec["y"], [True, False], 11)
assert_array_equal(rec["x"], [10, 5])
assert_array_equal(rec["y"], [11, 4])
assert_array_equal(rec["z"], [3, 3])
def test_overlaps(self):
# gh-6272 check overlap
x = np.array([True, False, True, False])
np.putmask(x[1:4], [True, True, True], x[:3])
assert_equal(x, np.array([True, True, False, True]))
x = np.array([True, False, True, False])
np.putmask(x[1:4], x[:3], [True, False, True])
assert_equal(x, np.array([True, True, True, True]))
def test_writeable(self):
a = np.arange(5)
a.flags.writeable = False
with pytest.raises(ValueError):
np.putmask(a, a >= 2, 3)
def test_kwargs(self):
x = np.array([0, 0])
np.putmask(x, [0, 1], [-1, -2])
assert_array_equal(x, [0, -2])
x = np.array([0, 0])
np.putmask(x, mask=[0, 1], values=[-1, -2])
assert_array_equal(x, [0, -2])
x = np.array([0, 0])
np.putmask(x, values=[-1, -2], mask=[0, 1])
assert_array_equal(x, [0, -2])
with pytest.raises(TypeError):
np.putmask(a=x, values=[-1, -2], mask=[0, 1])
@instantiate_parametrized_tests
| TestPutmask |
python | huggingface__transformers | src/transformers/models/deformable_detr/modeling_deformable_detr.py | {
"start": 12692,
"end": 15129
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `DeformableDetrFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = DeformableDetrFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
| DeformableDetrFrozenBatchNorm2d |
python | Pylons__pyramid | src/pyramid/predicates.py | {
"start": 8395,
"end": 9110
} | class ____:
def __init__(self, predicate):
self.predicate = predicate
def _notted_text(self, val):
# if the underlying predicate doesnt return a value, it's not really
# a predicate, it's just something pretending to be a predicate,
# so dont update the hash
if val:
val = '!' + val
return val
def text(self):
return self._notted_text(self.predicate.text())
def phash(self):
return self._notted_text(self.predicate.phash())
def __call__(self, context, request):
result = self.predicate(context, request)
phash = self.phash()
if phash:
result = not result
return result
| Notted |
python | getsentry__sentry | src/sentry/interfaces/contexts.py | {
"start": 6318,
"end": 6473
} | class ____(ContextType):
type = "browser"
context_to_tag_mapping = {"": "{browser}", "name": "{name}"}
# viewport
@contexttype
| BrowserContextType |
python | getsentry__sentry | tests/sentry/relocation/api/endpoints/artifacts/test_index.py | {
"start": 4624,
"end": 7691
} | class ____(GetRelocationArtifactsTest):
@override_options({"staff.ga-rollout": True})
def test_bad_unprivileged_user(self) -> None:
self.login_as(user=self.owner, superuser=False, staff=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), status_code=403)
self.get_error_response(str(self.relocation.uuid), status_code=403)
def test_bad_superuser_disabled(self) -> None:
self.add_user_permission(self.superuser, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.superuser, superuser=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), status_code=403)
self.get_error_response(str(self.relocation.uuid), status_code=403)
@override_options({"staff.ga-rollout": True})
def test_bad_staff_disabled(self) -> None:
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), status_code=403)
self.get_error_response(str(self.relocation.uuid), status_code=403)
def test_bad_has_superuser_but_no_relocation_admin_permission(self) -> None:
self.login_as(user=self.superuser, superuser=True)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
response = self.get_error_response(str(does_not_exist_uuid), status_code=403)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
response = self.get_error_response(str(self.relocation.uuid), status_code=403)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
@override_options({"staff.ga-rollout": True})
def test_bad_has_staff_but_no_relocation_admin_permission(self) -> None:
self.login_as(user=self.staff_user, staff=True)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
response = self.get_error_response(str(does_not_exist_uuid), status_code=403)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
response = self.get_error_response(str(self.relocation.uuid), status_code=403)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
@override_options({"staff.ga-rollout": True})
def test_bad_relocation_not_found(self) -> None:
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=True)
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), status_code=404)
| GetRelocationArtifactsBadTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 96037,
"end": 100002
} | class ____(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.session() as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
@test_util.run_v1_only("b/124229375")
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
@test_util.run_v1_only("b/124229375")
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
@test_util.run_v1_only("b/124229375")
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
| TensorArrayOnCorrectDeviceTest |
python | celery__celery | t/unit/worker/test_request.py | {
"start": 2154,
"end": 2977
} | class ____:
def test_order(self):
class A:
pass
class B(A):
pass
class C(B):
pass
class D(C):
@classmethod
def mro(cls):
return ()
A.x = 10
assert mro_lookup(C, 'x') == A
assert mro_lookup(C, 'x', stop={A}) is None
B.x = 10
assert mro_lookup(C, 'x') == B
C.x = 10
assert mro_lookup(C, 'x') == C
assert mro_lookup(D, 'x') is None
def jail(app, task_id, name, request_opts, args, kwargs):
request = {'id': task_id}
request.update(request_opts)
task = app.tasks[name]
task.__trace__ = None # rebuild
return trace_task(
task, task_id, args, kwargs, request=request, eager=False, app=app,
).retval
| test_mro_lookup |
python | facelessuser__soupsieve | soupsieve/css_types.py | {
"start": 8818,
"end": 10192
} | class ____(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
selectors: tuple[Selector | SelectorNull, ...]
is_not: bool
is_html: bool
def __init__(
self,
selectors: Iterable[Selector | SelectorNull] | None = None,
is_not: bool = False,
is_html: bool = False
) -> None:
"""Initialize."""
super().__init__(
selectors=tuple(selectors) if selectors is not None else (),
is_not=is_not,
is_html=is_html
)
def __iter__(self) -> Iterator[Selector | SelectorNull]:
"""Iterator."""
return iter(self.selectors)
def __len__(self) -> int:
"""Length."""
return len(self.selectors)
def __getitem__(self, index: int) -> Selector | SelectorNull:
"""Get item."""
return self.selectors[index]
def _pickle(p: Any) -> Any:
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj: Any) -> None:
"""Allow object to be pickled."""
copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)
| SelectorList |
python | django__django | tests/admin_views/admin.py | {
"start": 15205,
"end": 16026
} | class ____(admin.ModelAdmin):
list_display = ["title", "public"]
readonly_fields = (
"posted",
"awesomeness_level",
"coolness",
"value",
"multiline",
"multiline_html",
lambda obj: "foo",
"readonly_content",
)
inlines = [LinkInline]
@admin.display
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
@admin.display(description="Value in $US")
def value(self, instance):
return 1000
@admin.display
def multiline(self, instance):
return "Multiline\ntest\nstring"
@admin.display
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
| PostAdmin |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 4215,
"end": 4927
} | class ____:
def __init__(self, info, runtime_metadata, trace_joint):
self.base_idx = info.base_idx
self.unwrap_out = _unwrap_tensoralias if trace_joint else _identity
self.requires_grad = info.requires_grad
self.view_meta_sequence = info.view_meta_sequence
self.replay_views = config.view_replay_for_aliased_outputs
def __call__(self, orig_inputs, fw_outs, out):
aliased_base_tensor = orig_inputs[self.base_idx]
return gen_alias_from_base(
aliased_base_tensor,
self.unwrap_out(out),
self.requires_grad,
self.view_meta_sequence,
replay_views=self.replay_views,
)
| AliasOfInputHandler |
python | pdm-project__pdm | src/pdm/pytest.py | {
"start": 6904,
"end": 7170
} | class ____(dict):
def get_all(self, name: str, fallback: list[str] | None = None) -> list[str] | None:
return [self[name]] if name in self else fallback
def __getitem__(self, __key: str) -> str:
return cast(str, dict.get(self, __key))
| Metadata |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1008073,
"end": 1045000
} | class ____(AnyMarkConfig):
"""
RectConfig schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
binSpacing : float
Offset between bars for binned field. The ideal value for this is either 0
(preferred by statisticians) or 1 (Vega-Lite default, D3 example style).
**Default value:** ``1``
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
continuousBandSize : float
The default size of the bars on continuous scales.
**Default value:** ``5``
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl']
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
discreteBandSize : dict, float, :class:`RelativeBandSize`
The default size of the bars with discrete dimensions. If unspecified, the default
size is ``step-2``, which provides 2 pixel offset between bars.
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle : dict, float, :class:`ExprRef`
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`
Height of the marks.
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
minBandSize : dict, float, :class:`ExprRef`
The minimum band size for bar and rectangle marks. **Default value:** ``0.25``
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle : dict, float, :class:`ExprRef`
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`
Width of the marks.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
"""
_schema = {"$ref": "#/definitions/RectConfig"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
binSpacing: Optional[float] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
continuousBandSize: Optional[float] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
discreteBandSize: Optional[float | SchemaBase | Map] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
endAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
minBandSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
startAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
baseline=baseline,
binSpacing=binSpacing,
blend=blend,
color=color,
continuousBandSize=continuousBandSize,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
discreteBandSize=discreteBandSize,
dx=dx,
dy=dy,
ellipsis=ellipsis,
endAngle=endAngle,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
lineBreak=lineBreak,
lineHeight=lineHeight,
minBandSize=minBandSize,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
radius=radius,
radius2=radius2,
shape=shape,
size=size,
smooth=smooth,
startAngle=startAngle,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
y=y,
y2=y2,
**kwds,
)
| RectConfig |
python | TheAlgorithms__Python | data_structures/stacks/stack_with_doubly_linked_list.py | {
"start": 185,
"end": 411
} | class ____[T]:
def __init__(self, data: T):
self.data = data # Assign data
self.next: Node[T] | None = None # Initialize next as null
self.prev: Node[T] | None = None # Initialize prev as null
| Node |
python | rapidsai__cudf | python/cudf/cudf/core/indexing_utils.py | {
"start": 1046,
"end": 1141
} | class ____:
"""An indexer for a boolean mask."""
key: BooleanMask
@dataclass
| MaskIndexer |
python | bokeh__bokeh | src/bokeh/models/widgets/markups.py | {
"start": 3177,
"end": 3527
} | class ____(Markup):
''' A block (paragraph) of text.
This Bokeh model corresponds to an HTML ``<p>`` element.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/interaction/widgets/paragraph.py"
| Paragraph |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/solids.py | {
"start": 14588,
"end": 18208
} | class ____:
def __init__(self, represented_pipeline: RepresentedJob, solid_def_name: str):
self._represented_pipeline = check.inst_param(
represented_pipeline, "represented_pipeline", RepresentedJob
)
check.str_param(solid_def_name, "solid_def_name")
self._solid_def_snap = represented_pipeline.get_node_def_snap(solid_def_name)
def resolve_metadata(self, _graphene_info):
metadata_items = []
for key, val in self._solid_def_snap.tags.items():
metadata_items.append(GrapheneMetadataItemDefinition(key=key, value=val))
# Backcompat for legacy system tags. Older code servers may report the compute kind under
# the legacy tag. Code servers running versions after deprecation of the legacy tag may
# have both the legacy and current tag set. We can't patch this at the Snap level
# because the host process will complain about mismatched snap IDs
if key == LEGACY_COMPUTE_KIND_TAG and COMPUTE_KIND_TAG not in self._solid_def_snap.tags:
metadata_items.append(
GrapheneMetadataItemDefinition(key=COMPUTE_KIND_TAG, value=val)
)
return metadata_items
@property
def solid_def_name(self) -> str:
return self._solid_def_snap.name
def resolve_input_definitions(self, _graphene_info) -> Sequence[GrapheneInputDefinition]:
return [
GrapheneInputDefinition(
self._represented_pipeline, self.solid_def_name, input_def_snap.name
)
for input_def_snap in self._solid_def_snap.input_def_snaps
]
def resolve_output_definitions(self, _graphene_info):
return [
GrapheneOutputDefinition(
self._represented_pipeline,
self.solid_def_name,
output_def_snap.name,
output_def_snap.is_dynamic,
)
for output_def_snap in self._solid_def_snap.output_def_snaps
]
def resolve_assetNodes(self, graphene_info: ResolveInfo) -> Sequence["GrapheneAssetNode"]:
from dagster_graphql.schema.asset_graph import GrapheneAssetNode
# This is a workaround for the fact that asset info is not persisted in pipeline snapshots.
if isinstance(self._represented_pipeline, HistoricalJob):
return []
else:
assert isinstance(self._represented_pipeline, RemoteJob)
job_asset_nodes = graphene_info.context.get_assets_in_job(
self._represented_pipeline.handle.to_selector()
)
remote_nodes = [
remote_node
for remote_node in job_asset_nodes
if (
(remote_node.asset_node_snap.node_definition_name == self.solid_def_name)
or (
remote_node.asset_node_snap.graph_name
and remote_node.asset_node_snap.graph_name == self.solid_def_name
)
)
]
return [
GrapheneAssetNode(
remote_node=remote_node,
)
for remote_node in remote_nodes
]
def resolve_pools(self, _graphene_info) -> Sequence[str]:
if isinstance(self._solid_def_snap, OpDefSnap):
return [self._solid_def_snap.pool] if self._solid_def_snap.pool else []
if isinstance(self._solid_def_snap, GraphDefSnap):
return list(self._solid_def_snap.pools)
return []
| ISolidDefinitionMixin |
python | tiangolo__fastapi | tests/test_dependency_class.py | {
"start": 597,
"end": 3379
} | class ____:
def synchronous(self, value: str) -> str:
return value
async def asynchronous(self, value: str) -> str:
return value
def synchronous_gen(self, value: str) -> Generator[str, None, None]:
yield value
async def asynchronous_gen(self, value: str) -> AsyncGenerator[str, None]:
yield value
callable_dependency = CallableDependency()
callable_gen_dependency = CallableGenDependency()
async_callable_dependency = AsyncCallableDependency()
async_callable_gen_dependency = AsyncCallableGenDependency()
methods_dependency = MethodsDependency()
@app.get("/callable-dependency")
async def get_callable_dependency(value: str = Depends(callable_dependency)):
return value
@app.get("/callable-gen-dependency")
async def get_callable_gen_dependency(value: str = Depends(callable_gen_dependency)):
return value
@app.get("/async-callable-dependency")
async def get_async_callable_dependency(
value: str = Depends(async_callable_dependency),
):
return value
@app.get("/async-callable-gen-dependency")
async def get_async_callable_gen_dependency(
value: str = Depends(async_callable_gen_dependency),
):
return value
@app.get("/synchronous-method-dependency")
async def get_synchronous_method_dependency(
value: str = Depends(methods_dependency.synchronous),
):
return value
@app.get("/synchronous-method-gen-dependency")
async def get_synchronous_method_gen_dependency(
value: str = Depends(methods_dependency.synchronous_gen),
):
return value
@app.get("/asynchronous-method-dependency")
async def get_asynchronous_method_dependency(
value: str = Depends(methods_dependency.asynchronous),
):
return value
@app.get("/asynchronous-method-gen-dependency")
async def get_asynchronous_method_gen_dependency(
value: str = Depends(methods_dependency.asynchronous_gen),
):
return value
client = TestClient(app)
@pytest.mark.parametrize(
"route,value",
[
("/callable-dependency", "callable-dependency"),
("/callable-gen-dependency", "callable-gen-dependency"),
("/async-callable-dependency", "async-callable-dependency"),
("/async-callable-gen-dependency", "async-callable-gen-dependency"),
("/synchronous-method-dependency", "synchronous-method-dependency"),
("/synchronous-method-gen-dependency", "synchronous-method-gen-dependency"),
("/asynchronous-method-dependency", "asynchronous-method-dependency"),
("/asynchronous-method-gen-dependency", "asynchronous-method-gen-dependency"),
],
)
def test_class_dependency(route, value):
response = client.get(route, params={"value": value})
assert response.status_code == 200, response.text
assert response.json() == value
| MethodsDependency |
python | mlflow__mlflow | mlflow/pyfunc/__init__.py | {
"start": 22303,
"end": 26848
} | class ____:
CONDA = "conda"
VIRTUALENV = "virtualenv"
def __init__(self):
raise NotImplementedError("This class is not meant to be instantiated.")
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
def add_to_model(
model,
loader_module,
data=None,
code=None,
conda_env=None,
python_env=None,
model_config=None,
model_code_path=None,
**kwargs,
):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
Args:
model: Existing model.
loader_module: The module to be used to load the model.
data: Path to the model data.
code: Path to the code dependencies.
conda_env: Conda environment.
python_env: Python environment.
model_config: The model configuration to apply to the model. This configuration
is available during model loading.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
model_code_path: Path to the model code.
kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
Returns:
Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if conda_env or python_env:
params[ENV] = {}
if conda_env:
params[ENV][EnvType.CONDA] = conda_env
if python_env:
params[ENV][EnvType.VIRTUALENV] = python_env
if model_config:
params[MODEL_CONFIG] = model_config
if model_code_path:
params[MODEL_CODE_PATH] = model_code_path
return model.add_flavor(FLAVOR_NAME, **params)
def _extract_conda_env(env):
# In MLflow < 2.0.0, the 'env' field in a pyfunc configuration is a string containing the path
# to a conda.yaml file.
return env if isinstance(env, str) else env[EnvType.CONDA]
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _validate_params(params, model_metadata):
if hasattr(model_metadata, "get_params_schema"):
params_schema = model_metadata.get_params_schema()
return _enforce_params_schema(params, params_schema)
if params:
raise MlflowException.invalid_parameter_value(
"This model was not logged with a params schema and does not support "
"providing the params argument."
"Please log the model with mlflow >= 2.6.0 and specify a params schema.",
)
return
def _validate_prediction_input(data: PyFuncInput, params, input_schema, params_schema, flavor=None):
"""
Internal helper function to transform and validate input data and params for prediction.
Any additional transformation logics related to input data and params should be added here.
"""
if input_schema is not None:
try:
data = _enforce_schema(data, input_schema, flavor)
except Exception as e:
if MLFLOW_DISABLE_SCHEMA_DETAILS.get():
message = "Failed to enforce model input schema. Please check your input data."
else:
# Include error in message for backwards compatibility
message = (
f"Failed to enforce schema of data '{data}' "
f"with schema '{input_schema}'. "
f"Error: {e}"
)
raise MlflowException.invalid_parameter_value(message)
params = _enforce_params_schema(params, params_schema)
if HAS_PYSPARK and isinstance(data, SparkDataFrame):
_logger.warning(
"Input data is a Spark DataFrame. Note that behaviour for "
"Spark DataFrames is model dependent."
)
return data, params
| EnvType |
python | spyder-ide__spyder | spyder/api/config/mixins.py | {
"start": 601,
"end": 7026
} | class ____:
"""
Mixin used to access options stored in the Spyder configuration system.
"""
# Name of the configuration section that's going to be
# used to record the object's permanent data in Spyder
# config system.
CONF_SECTION = None
def get_conf(
self,
option: ConfigurationKey,
default: Union[NoDefault, BasicTypes] = NoDefault,
section: Optional[str] = None,
secure: Optional[bool] = False,
):
"""
Get an option from the Spyder configuration system.
Parameters
----------
option: ConfigurationKey
Name/Tuple path of the option to get its value from.
default: Union[NoDefault, BasicTypes]
Fallback value to return if the option is not found on the
configuration system.
section: str
Section in the configuration system, e.g. `shortcuts`. If None,
then the value of `CONF_SECTION` is used.
secure: bool
If True, the option will be retrieved securely using the `keyring`
Python package.
Returns
-------
value: BasicTypes
Value of the option in the configuration section.
Raises
------
configparser.NoOptionError
If the section does not exist in the configuration.
"""
section = self.CONF_SECTION if section is None else section
if section is None:
raise AttributeError(
'A SpyderConfigurationAccessor must define a `CONF_SECTION` '
'class attribute!'
)
return CONF.get(section, option, default, secure)
def get_conf_options(self, section: Optional[str] = None):
"""
Get all options from the given section.
Parameters
----------
section: Optional[str]
Section in the configuration system, e.g. `shortcuts`. If None,
then the value of `CONF_SECTION` is used.
Returns
-------
values: BasicTypes
Values of the option in the configuration section.
Raises
------
configparser.NoOptionError
If the section does not exist in the configuration.
"""
section = self.CONF_SECTION if section is None else section
if section is None:
raise AttributeError(
'A SpyderConfigurationAccessor must define a `CONF_SECTION` '
'class attribute!'
)
return CONF.options(section)
def set_conf(
self,
option: ConfigurationKey,
value: BasicTypes,
section: Optional[str] = None,
recursive_notification: bool = True,
secure: Optional[bool] = False,
):
"""
Set an option in the Spyder configuration system.
Parameters
----------
option: ConfigurationKey
Name/Tuple path of the option to set its value.
value: BasicTypes
Value to set on the configuration system.
section: Optional[str]
Section in the configuration system, e.g. `shortcuts`. If None,
then the value of `CONF_SECTION` is used.
recursive_notification: bool
If True, all objects that observe all changes on the
configuration section and objects that observe partial tuple paths
are notified. For example if the option `opt` of section `sec`
changes, then the observers for section `sec` are notified.
Likewise, if the option `(a, b, c)` changes, then observers for
`(a, b, c)`, `(a, b)` and a are notified as well.
secure: bool
If True, the option will be saved securely using the `keyring`
Python package.
"""
section = self.CONF_SECTION if section is None else section
if section is None:
raise AttributeError(
'A SpyderConfigurationAccessor must define a `CONF_SECTION` '
'class attribute!'
)
CONF.set(
section,
option,
value,
recursive_notification=recursive_notification,
secure=secure,
)
def remove_conf(
self,
option: ConfigurationKey,
section: Optional[str] = None,
secure: Optional[str] = False,
):
"""
Remove an option in the Spyder configuration system.
Parameters
----------
option: ConfigurationKey
Name/Tuple path of the option to remove its value.
section: Optional[str]
Section in the configuration system, e.g. `shortcuts`. If None,
then the value of `CONF_SECTION` is used.
secure: bool
If True, the option will be removed securely using the `keyring`
Python package.
"""
section = self.CONF_SECTION if section is None else section
if section is None:
raise AttributeError(
'A SpyderConfigurationAccessor must define a `CONF_SECTION` '
'class attribute!'
)
CONF.remove_option(section, option, secure)
def get_conf_default(self,
option: ConfigurationKey,
section: Optional[str] = None):
"""
Get an option default value in the Spyder configuration system.
Parameters
----------
option: ConfigurationKey
Name/Tuple path of the option to remove its value.
section: Optional[str]
Section in the configuration system, e.g. `shortcuts`. If None,
then the value of `CONF_SECTION` is used.
"""
section = self.CONF_SECTION if section is None else section
if section is None:
raise AttributeError(
'A SpyderConfigurationAccessor must define a `CONF_SECTION` '
'class attribute!'
)
return CONF.get_default(section, option)
@property
def spyder_conf_version(self):
"""Get current version for the Spyder configuration system."""
return CONF_VERSION
@property
def old_spyder_conf_version(self):
"""Get old version for the Spyder configuration system."""
return CONF.old_spyder_version
| SpyderConfigurationAccessor |
python | tensorflow__tensorflow | tensorflow/python/ops/lookup_ops.py | {
"start": 35625,
"end": 44032
} | class ____(LookupInterface):
r"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
The `IdTableWithHashBuckets` object will performs the following mapping:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
* `<other term> -> bucket_id`, where bucket_id will be between `3` and
`3 + num_oov_buckets - 1`, calculated by:
`hash(<term>) % num_oov_buckets + vocab_size`
If input_tensor is `["emerson", "lake", "palmer", "king", "crimson"]`,
the lookup result is `[0, 1, 2, 4, 7]`.
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.StaticHashTable(
tf.lookup.TextFileInitializer(
filename,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER,
delimiter="\t"),
default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`. Must
be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid `key_dtype`, expected one of "
f"{supported_table_key_dtypes}, received {key_dtype}.")
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid `key dtype`, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid `value_dtype`: expected int64 but got %s." %
(table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("`oov_buckets` must be > 0 if no `table` is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError("Invalid `key_dtype`, expected integer or string, got "
f"{key_dtype}.")
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("`hasher_spec` must be of type HasherSpec, got "
f"{type(hasher_spec)}.")
self._hasher_spec = hasher_spec
if name:
self._table_name = name.split("/")[-1]
else:
self._table_name = None
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64)
def _create_resource(self):
if self._table is not None:
return self._table._create_resource() # pylint: disable=protected-access
return None
def _initialize(self):
if self._table is not None:
return self._table._initialize() # pylint: disable=protected-access
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
@property
def initializer(self):
if self._table is not None:
return self._table._init_op # pylint: disable=protected-access
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
@property
@deprecated("2018-12-15", "Use `initializer` instead.")
def init(self):
return self.initializer
@property
def resource_handle(self):
if self._table is not None:
return self._table.resource_handle
return None
@property
def name(self):
return self._table_name
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name):
if self._table:
tsize = self._table.size()
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("`hasher_spec` must be of type HasherSpec, got "
f"{type(hasher_spec)}.")
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError(
f"Found unknown hasher {hasher_spec.hasher} in `hasher_spec`")
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,
otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, "
f"received: {keys.dtype}")
values = keys
# TODO(b/296302236): Remove RaggedTensor check by adding ragged
# dispatching.
if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.cast(values, dtypes.int64)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name):
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where_v2(is_id_non_default, ids, buckets)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
# TODO(b/296302236): Remove RaggedTensor check by adding ragged
# dispatching.
elif isinstance(keys, internal.RaggedTensor):
return keys.with_values(ids)
return ids
@tf_export("lookup.StaticVocabularyTable", v1=[])
| IdTableWithHashBuckets |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 27793,
"end": 28094
} | class ____(StateMachineEvent):
"""Scheduler -> Worker message containing updated who_has information.
See also
--------
RequestRefreshWhoHasMsg
"""
__slots__ = ("who_has",)
# {key: [worker address, ...]}
who_has: dict[Key, Collection[str]]
@dataclass
| RefreshWhoHasEvent |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/base.py | {
"start": 44772,
"end": 46480
} | class ____(default.DefaultExecutionContext):
def post_exec(self) -> None:
if (
self.isdelete
and cast(SQLCompiler, self.compiled).effective_returning
and not self.cursor.description
):
# All MySQL/mariadb drivers appear to not include
# cursor.description for DELETE..RETURNING with no rows if the
# WHERE criteria is a straight "false" condition such as our EMPTY
# IN condition. manufacture an empty result in this case (issue
# #10505)
#
# taken from cx_Oracle implementation
self.cursor_fetch_strategy = (
_cursor.FullyBufferedCursorFetchStrategy(
self.cursor,
[
(entry.keyname, None) # type: ignore[misc]
for entry in cast(
SQLCompiler, self.compiled
)._result_columns
],
[],
)
)
def create_server_side_cursor(self) -> DBAPICursor:
if self.dialect.supports_server_side_cursors:
return self._dbapi_connection.cursor(
self.dialect._sscursor # type: ignore[attr-defined]
)
else:
raise NotImplementedError()
def fire_sequence(
self, seq: Sequence_SchemaItem, type_: sqltypes.Integer
) -> int:
return self._execute_scalar( # type: ignore[no-any-return]
(
"select nextval(%s)"
% self.identifier_preparer.format_sequence(seq)
),
type_,
)
| MySQLExecutionContext |
python | django__django | tests/gis_tests/test_measure.py | {
"start": 4949,
"end": 8529
} | class ____(unittest.TestCase):
"Testing the Area object"
def test_init(self):
"Testing initialization from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def test_init_invalid_a(self):
"Testing initialization from invalid units"
with self.assertRaises(AttributeError):
A(banana=100)
def test_access(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def test_access_invalid_a(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, "banana"))
def test_addition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a1 + 1
with self.assertRaises(TypeError):
a1 - 1
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
def test_multiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a1 * A(sq_m=1)
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
a1 / A(sq_m=1)
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
def test_unit_conversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, "sq_m")
a4 = a2 + a1
self.assertEqual(a4._default_unit, "sq_km")
a5 = a1 * 2
self.assertEqual(a5._default_unit, "sq_m")
a6 = a1 / 2
self.assertEqual(a6._default_unit, "sq_m")
def test_comparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
def test_units_str(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), "100.0 sq_m")
self.assertEqual(str(a2), "3.5 sq_km")
self.assertEqual(repr(a1), "Area(sq_m=100.0)")
self.assertEqual(repr(a2), "Area(sq_km=3.5)")
def test_hectare(self):
a = A(sq_m=10000)
self.assertEqual(a.ha, 1)
def test_hectare_unit_att_name(self):
self.assertEqual(A.unit_attname("Hectare"), "ha")
def test_hash(self):
a1 = A(sq_m=100)
a2 = A(sq_m=1000000)
a3 = A(sq_km=1)
self.assertEqual(hash(a2), hash(a3))
self.assertNotEqual(hash(a1), hash(a2))
self.assertNotEqual(hash(a1), hash(a3))
| AreaTest |
python | pytorch__pytorch | test/distributed/_shard/test_sharder.py | {
"start": 921,
"end": 1552
} | class ____(nn.Module):
def __init__(self, num_bags, num_embeddings_per_bag, num_dims):
super().__init__()
self.num_bags = num_bags
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
for i in range(num_bags):
self.embedding_bags[f"embedding_bag_{i}"] = nn.EmbeddingBag(
num_embeddings_per_bag, num_dims, mode="sum"
)
def forward(self, inputs):
outputs = []
for bag in self.embedding_bags.values():
outputs.append(bag(inputs))
return torch.cat(outputs)
# a simple sharded version of EBC
| CustomEmbeddingBagCollection |
python | google__jax | jax/experimental/sparse/_base.py | {
"start": 772,
"end": 3239
} | class ____(util.StrictABC):
"""Base class for high-level JAX sparse objects."""
data: jax.Array
shape: tuple[int, ...]
nse: property
dtype: property
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
def __len__(self):
return self.shape[0]
@property
def size(self) -> int:
return math.prod(self.shape)
@property
def ndim(self) -> int:
return len(self.shape)
def __init__(self, args: tuple[Array, ...], *, shape: Sequence[int]):
self.shape = core.canonicalize_shape(shape)
def __repr__(self):
name = self.__class__.__name__
try:
nse = self.nse
dtype = self.dtype
shape = list(self.shape)
except:
repr_ = f"{name}(<invalid>)"
else:
repr_ = f"{name}({dtype}{shape}, {nse=})"
if isinstance(self.data, core.Tracer):
repr_ = f"{type(self.data).__name__}[{repr_}]"
return repr_
@abc.abstractmethod
def tree_flatten(self):
...
@classmethod
@abc.abstractmethod
def tree_unflatten(cls, aux_data, children):
...
@abc.abstractmethod
def transpose(self, axes=None):
...
@property
def T(self):
return self.transpose()
def block_until_ready(self):
for arg in self.tree_flatten()[0]:
arg.block_until_ready()
return self
# Not abstract methods because not all sparse classes implement them
def sum(self, *args, **kwargs):
raise NotImplementedError(f"{self.__class__}.sum")
def __neg__(self):
raise NotImplementedError(f"{self.__class__}.__neg__")
def __pos__(self):
raise NotImplementedError(f"{self.__class__}.__pos__")
def __matmul__(self, other):
raise NotImplementedError(f"{self.__class__}.__matmul__")
def __rmatmul__(self, other):
raise NotImplementedError(f"{self.__class__}.__rmatmul__")
def __mul__(self, other):
raise NotImplementedError(f"{self.__class__}.__mul__")
def __rmul__(self, other):
raise NotImplementedError(f"{self.__class__}.__rmul__")
def __add__(self, other):
raise NotImplementedError(f"{self.__class__}.__add__")
def __radd__(self, other):
raise NotImplementedError(f"{self.__class__}.__radd__")
def __sub__(self, other):
raise NotImplementedError(f"{self.__class__}.__sub__")
def __rsub__(self, other):
raise NotImplementedError(f"{self.__class__}.__rsub__")
def __getitem__(self, item):
raise NotImplementedError(f"{self.__class__}.__getitem__")
| JAXSparse |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 93865,
"end": 95135
} | class ____:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan")
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf")
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf")
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0")
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0")
| TestHypotSpecialValues |
python | ray-project__ray | python/ray/tests/gpu_objects/test_gpu_objects_gloo.py | {
"start": 767,
"end": 1825
} | class ____:
@ray.method(tensor_transport="gloo")
def echo(self, data):
return data
def add(self, a, b):
return a + b
def double(self, data):
if isinstance(data, list):
return [self.double(d) for d in data]
if support_tensordict and isinstance(data, TensorDict):
return data.apply(lambda x: x * 2)
return data * 2
def increment(self, data):
data += 1
return data
def get_out_of_band_tensors(self, obj_id: str, timeout=None):
gpu_object_store = (
ray._private.worker.global_worker.gpu_object_manager.gpu_object_store
)
if timeout is None:
timeout = 0
return gpu_object_store.wait_and_get_object(obj_id, timeout)
def get_num_gpu_objects(self):
gpu_object_manager = ray._private.worker.global_worker.gpu_object_manager
return gpu_object_manager.gpu_object_store.get_num_objects()
def fail(self, error_message):
raise Exception(error_message)
@ray.remote
| GPUTestActor |
python | numba__numba | numba/core/typed_passes.py | {
"start": 7343,
"end": 8402
} | class ____(AnalysisPass):
_name = "annotate_types"
def __init__(self):
AnalysisPass.__init__(self)
def get_analysis_usage(self, AU):
AU.add_required(IRLegalization)
def run_pass(self, state):
"""
Create type annotation after type inference
"""
func_ir = state.func_ir.copy()
state.type_annotation = type_annotations.TypeAnnotation(
func_ir=func_ir,
typemap=state.typemap,
calltypes=state.calltypes,
lifted=state.lifted,
lifted_from=state.lifted_from,
args=state.args,
return_type=state.return_type,
html_output=config.HTML)
if config.ANNOTATE:
print("ANNOTATION".center(80, '-'))
print(state.type_annotation)
print('=' * 80)
if config.HTML:
with open(config.HTML, 'w') as fout:
state.type_annotation.html_annotate(fout)
return False
@register_pass(mutates_CFG=True, analysis_only=False)
| AnnotateTypes |
python | sympy__sympy | sympy/geometry/exceptions.py | {
"start": 24,
"end": 131
} | class ____(ValueError):
"""An exception raised by classes in the geometry module."""
pass
| GeometryError |
python | numba__numba | numba/tests/test_analysis.py | {
"start": 30814,
"end": 33327
} | class ____(MemoryLeakMixin, TestCase):
# Tests SSA rewiring of phi nodes after branch pruning.
class SSAPrunerCompiler(CompilerBase):
def define_pipelines(self):
# This is a simple pipeline that does branch pruning on IR in SSA
# form, then types and lowers as per the standard nopython pipeline.
pm = PassManager("testing pm")
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(IRProcessing, "processing IR")
# SSA early
pm.add_pass(ReconstructSSA, "ssa")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
# type and then lower as usual
pm.add_pass(PreserveIR, "preserves the IR as metadata")
dpb = DefaultPassBuilder
typed_passes = dpb.define_typed_pipeline(self.state)
pm.passes.extend(typed_passes.passes)
lowering_passes = dpb.define_nopython_lowering_pipeline(self.state)
pm.passes.extend(lowering_passes.passes)
pm.finalize()
return [pm]
def test_ssa_update_phi(self):
# This checks that dead branch pruning is rewiring phi nodes correctly
# after a block containing an incoming for a phi is removed.
@njit(pipeline_class=self.SSAPrunerCompiler)
def impl(p=None, q=None):
z = 1
r = False
if p is None:
r = True # live
if r and q is not None:
z = 20 # dead
# one of the incoming blocks for z is dead, the phi needs an update
# were this not done, it would refer to variables that do not exist
# and result in a lowering error.
return z, r
self.assertPreciseEqual(impl(), impl.py_func())
def test_ssa_replace_phi(self):
# This checks that when a phi only has one incoming, because the other
# has been pruned, that a direct assignment is used instead.
@njit(pipeline_class=self.SSAPrunerCompiler)
def impl(p=None):
z = 0
if p is None:
z = 10
else:
z = 20
return z
self.assertPreciseEqual(impl(), impl.py_func())
func_ir = impl.overloads[impl.signatures[0]].metadata['preserved_ir']
# check the func_ir, make sure there's no phi nodes
for blk in func_ir.blocks.values():
self.assertFalse([*blk.find_exprs('phi')])
| TestBranchPruneSSA |
python | weaviate__weaviate-python-client | weaviate/collections/classes/aggregate.py | {
"start": 4881,
"end": 5422
} | class ____(_MetricsNum):
def to_grpc(self) -> aggregate_pb2.AggregateRequest.Aggregation:
return aggregate_pb2.AggregateRequest.Aggregation(
property=self.property_name,
int=aggregate_pb2.AggregateRequest.Aggregation.Integer(
count=self.count,
maximum=self.maximum,
mean=self.mean,
median=self.median,
minimum=self.minimum,
mode=self.mode,
sum=self.sum_,
),
)
| _MetricsInteger |
python | ethereum__web3.py | tests/core/middleware/test_transaction_signing.py | {
"start": 1874,
"end": 12590
} | class ____(BaseProvider):
def make_request(self, method, params):
raise NotImplementedError(f"Cannot make request for {method}: {params}")
@pytest.fixture
def w3_dummy(request_mocker):
w3_base = Web3(provider=DummyProvider(), middleware=[])
with request_mocker(
w3_base,
mock_results={
"eth_sendRawTransaction": lambda *args: args,
"net_version": lambda *_: 1,
"eth_chainId": lambda *_: "0x02",
},
):
yield w3_base
def hex_to_bytes(s):
return to_bytes(hexstr=s)
TEST_SIGN_AND_SEND_RAW_MIDDLEWARE_PARAMS = (
("eth_sendTransaction", SAME_KEY_MIXED_TYPE, ADDRESS_2, NotImplementedError),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE,
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
MIXED_KEY_MIXED_TYPE,
ADDRESS_2,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
MIXED_KEY_MIXED_TYPE,
ADDRESS_1,
"eth_sendRawTransaction",
),
("eth_sendTransaction", SAME_KEY_SAME_TYPE, ADDRESS_2, NotImplementedError),
(
"eth_sendTransaction",
SAME_KEY_SAME_TYPE,
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
MIXED_KEY_SAME_TYPE,
ADDRESS_2,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
MIXED_KEY_SAME_TYPE,
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE[0],
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE[1],
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE[2],
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE[3],
ADDRESS_1,
"eth_sendRawTransaction",
),
(
"eth_sendTransaction",
SAME_KEY_MIXED_TYPE[4],
ADDRESS_1,
"eth_sendRawTransaction",
),
("eth_sendTransaction", SAME_KEY_MIXED_TYPE[0], ADDRESS_2, NotImplementedError),
("eth_sendTransaction", SAME_KEY_MIXED_TYPE[1], ADDRESS_2, NotImplementedError),
("eth_sendTransaction", SAME_KEY_MIXED_TYPE[2], ADDRESS_2, NotImplementedError),
("eth_sendTransaction", SAME_KEY_MIXED_TYPE[3], ADDRESS_2, NotImplementedError),
("eth_sendTransaction", SAME_KEY_MIXED_TYPE[4], ADDRESS_2, NotImplementedError),
("eth_call", MIXED_KEY_MIXED_TYPE, ADDRESS_1, NotImplementedError),
(
"eth_sendTransaction",
SAME_KEY_SAME_TYPE,
hex_to_bytes(ADDRESS_1),
"eth_sendRawTransaction",
),
)
TEST_SIGNED_TRANSACTION_PARAMS = (
(
{"gas": 21000, "gasPrice": 10**9, "value": 1},
-1,
MIXED_KEY_MIXED_TYPE,
ADDRESS_1,
),
(
{"value": 1},
-1,
MIXED_KEY_MIXED_TYPE,
ADDRESS_1,
),
# expect validation error + unmanaged account
(
{"gas": 21000, "value": 10},
ValidationError,
SAME_KEY_MIXED_TYPE,
ADDRESS_2,
),
(
{"gas": 21000, "value": 10},
InvalidAddress,
SAME_KEY_MIXED_TYPE,
"0x0000",
),
(
{"gas": 21000, "gasPrice": 0, "value": 1},
EthUtilsValidationError,
MIXED_KEY_MIXED_TYPE,
ADDRESS_1,
),
(
{
"type": "0x2",
"value": 22,
"maxFeePerGas": 2000000000,
"maxPriorityFeePerGas": 10**9,
},
-1,
SAME_KEY_MIXED_TYPE,
ADDRESS_1,
),
(
{
"value": 22,
"maxFeePerGas": 20**9,
"maxPriorityFeePerGas": 10**9,
},
-1,
SAME_KEY_MIXED_TYPE,
ADDRESS_1,
),
)
@pytest.mark.parametrize(
"method,key_object,from_,expected",
TEST_SIGN_AND_SEND_RAW_MIDDLEWARE_PARAMS,
)
def test_sign_and_send_raw_middleware(w3_dummy, method, from_, expected, key_object):
w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(key_object), layer=0
)
legacy_transaction = {
"to": "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf",
"from": from_,
"gas": 21000,
"gasPrice": 10**9,
"value": 1,
"nonce": 0,
}
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
w3_dummy.manager.request_blocking(method, [legacy_transaction])
else:
# assert with legacy txn params
actual = w3_dummy.manager.request_blocking(method, [legacy_transaction])
assert_method_and_txn_signed(actual, expected)
# assert with dynamic fee transaction params and explicit type
dynamic_fee_transaction = dissoc(legacy_transaction, "gasPrice")
dynamic_fee_transaction = assoc(
dynamic_fee_transaction, "maxFeePerGas", 2000000000
)
dynamic_fee_transaction = assoc(
dynamic_fee_transaction, "maxPriorityFeePerGas", 1000000000
)
dynamic_fee_transaction = assoc(dynamic_fee_transaction, "type", "0x2")
actual_dynamic_fee_call = w3_dummy.manager.request_blocking(
method, [dynamic_fee_transaction]
)
assert_method_and_txn_signed(actual_dynamic_fee_call, expected)
# assert with dynamic fee transaction params and no explicit type
dynamic_fee_transaction_no_type = dissoc(dynamic_fee_transaction, "type")
actual_dynamic_fee_call_no_type = w3_dummy.manager.request_blocking(
method, [dynamic_fee_transaction_no_type]
)
assert_method_and_txn_signed(actual_dynamic_fee_call_no_type, expected)
def assert_method_and_txn_signed(actual, expected):
raw_txn = actual[1][0]
actual_method = actual[0]
assert actual_method == expected
assert is_hexstr(raw_txn)
@pytest.fixture()
def w3():
_w3 = Web3(EthereumTesterProvider())
_w3.eth.default_account = _w3.eth.accounts[0]
return _w3
@pytest.mark.parametrize(
"key_object",
(
(SAME_KEY_MIXED_TYPE),
(MIXED_KEY_MIXED_TYPE),
(SAME_KEY_SAME_TYPE),
(MIXED_KEY_SAME_TYPE),
(SAME_KEY_MIXED_TYPE[0]),
(SAME_KEY_MIXED_TYPE[1]),
(SAME_KEY_MIXED_TYPE[2]),
(SAME_KEY_MIXED_TYPE[3]),
(SAME_KEY_MIXED_TYPE[4]),
),
)
def test_gen_normalized_accounts(key_object):
accounts = gen_normalized_accounts(key_object)
assert all(isinstance(account, LocalAccount) for account in accounts.values())
def test_gen_normalized_accounts_type_error(w3):
with pytest.raises(TypeError):
gen_normalized_accounts(1234567890)
@pytest.fixture
def fund_account(w3):
# fund local account
tx_value = w3.to_wei(10, "ether")
for address in (ADDRESS_1, ADDRESS_2):
w3.eth.send_transaction(
{
"to": address,
"from": w3.eth.default_account,
"gas": 21000,
"value": tx_value,
}
)
assert w3.eth.get_balance(address) == tx_value
@pytest.mark.parametrize(
"transaction,expected,key_object,from_",
TEST_SIGNED_TRANSACTION_PARAMS,
ids=[
"with set gas",
"with no set gas",
"with mismatched sender",
"with invalid sender",
"with gasPrice lower than base fee",
"with txn type and dynamic fee txn params",
"with dynamic fee txn params and no type",
],
)
def test_signed_transaction(w3, fund_account, transaction, expected, key_object, from_):
w3.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(key_object), layer=0
)
# Drop any falsy addresses
to_from = valfilter(bool, {"to": w3.eth.default_account, "from": from_})
_transaction = merge(transaction, to_from)
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
w3.eth.send_transaction(_transaction)
else:
start_balance = w3.eth.get_balance(
_transaction.get("from", w3.eth.default_account)
)
w3.eth.send_transaction(_transaction)
assert w3.eth.get_balance(_transaction.get("from")) <= start_balance + expected
@pytest.mark.parametrize(
"from_converter,to_converter",
(
(identity, identity),
(hex_to_bytes, identity),
(identity, hex_to_bytes),
(hex_to_bytes, hex_to_bytes),
),
)
def test_sign_and_send_raw_middleware_with_byte_addresses(
w3_dummy, from_converter, to_converter
):
private_key = PRIVATE_KEY_1
from_ = from_converter(ADDRESS_1)
to_ = to_converter(ADDRESS_2)
w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(private_key), layer=0
)
actual = w3_dummy.manager.request_blocking(
"eth_sendTransaction",
[
{
"to": to_,
"from": from_,
"gas": 21000,
"gasPrice": 0,
"value": 1,
"nonce": 0,
}
],
)
raw_txn = actual[1][0]
actual_method = actual[0]
assert actual_method == "eth_sendRawTransaction"
assert is_hexstr(raw_txn)
def test_sign_and_send_raw_middleware_with_buffered_gas_estimate_middleware(
w3_dummy, request_mocker
):
gas_buffer = 100000 # the default internal value
gas_estimate = 12345 - gas_buffer
w3_dummy.middleware_onion.add(BufferedGasEstimateMiddleware)
w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(PRIVATE_KEY_1), layer=0
)
with request_mocker(
w3_dummy,
mock_results={
"eth_getBlockByNumber": {"gasLimit": 200000}, # arbitrary high number
"eth_estimateGas": gas_estimate,
},
):
actual = w3_dummy.manager.request_blocking(
"eth_sendTransaction",
[
{
"to": ADDRESS_2,
"from": ADDRESS_1,
"value": 1,
"nonce": 0,
"maxFeePerGas": 10**9,
"maxPriorityFeePerGas": 10**9,
}
],
)
raw_txn = actual[1][0]
actual_method = actual[0]
assert actual_method == "eth_sendRawTransaction"
assert is_hexstr(raw_txn)
decoded_txn = rlp.decode(HexBytes(raw_txn[4:]), sedes=DynamicFeeTransaction)
assert decoded_txn["gas"] == gas_estimate + gas_buffer
# -- async -- #
| DummyProvider |
python | matplotlib__matplotlib | tools/triage_tests.py | {
"start": 7752,
"end": 12655
} | class ____:
"""
A model for a single image comparison test.
"""
def __init__(self, path, root, source):
self.source = source
self.root = root
self.dir = path.parent
self.diff = path.name
self.reldir = self.dir.relative_to(self.root)
basename = self.diff[:-len('-failed-diff.png')]
for ext in exts:
if basename.endswith(f'_{ext}'):
display_extension = f'_{ext}'
extension = ext
basename = basename[:-len(display_extension)]
break
else:
display_extension = ''
extension = 'png'
self.basename = basename
self.extension = extension
self.generated = f'{basename}.{extension}'
self.expected = f'{basename}-expected.{extension}'
self.expected_display = f'{basename}-expected{display_extension}.png'
self.generated_display = f'{basename}{display_extension}.png'
self.name = self.reldir / self.basename
self.destdir = self.get_dest_dir(self.reldir)
self.thumbnails = [
self.generated_display,
self.expected_display,
self.diff
]
self.thumbnails = [self.dir / x for x in self.thumbnails]
if self.destdir is None or not Path(self.destdir, self.generated).exists():
# This case arises from a check_figures_equal test.
self.status = 'autogen'
elif ((self.dir / self.generated).read_bytes()
== (self.destdir / self.generated).read_bytes()):
self.status = 'accept'
else:
self.status = 'unknown'
def get_dest_dir(self, reldir):
"""
Find the source tree directory corresponding to the given
result_images subdirectory.
"""
for baseline_dir in BASELINE_IMAGES:
path = self.source / baseline_dir / reldir
if path.is_dir():
return path
@property
def display(self):
"""
Get the display string for this entry. This is the text that
appears in the list widget.
"""
status_map = {
'unknown': '\N{BALLOT BOX}',
'accept': '\N{BALLOT BOX WITH CHECK}',
'reject': '\N{BALLOT BOX WITH X}',
'autogen': '\N{WHITE SQUARE CONTAINING BLACK SMALL SQUARE}',
}
box = status_map[self.status]
return f'{box} {self.name} [{self.extension}]'
def accept(self):
"""
Accept this test by copying the generated result to the source tree.
"""
copy_file(self.dir / self.generated, self.destdir / self.generated)
self.status = 'accept'
def reject(self):
"""
Reject this test by copying the expected result to the source tree.
"""
expected = self.dir / self.expected
if not expected.is_symlink():
copy_file(expected, self.destdir / self.generated)
self.status = 'reject'
def copy_file(a, b):
"""Copy file from *a* to *b*."""
print(f'copying: {a} to {b}')
shutil.copyfile(a, b)
def find_failing_tests(result_images, source):
"""
Find all of the failing tests by looking for files with
`-failed-diff` at the end of the basename.
"""
return [Entry(path, result_images, source)
for path in sorted(Path(result_images).glob("**/*-failed-diff.*"))]
def launch(result_images, source):
"""
Launch the GUI.
"""
entries = find_failing_tests(result_images, source)
if len(entries) == 0:
print("No failed tests")
sys.exit(0)
app = QtWidgets.QApplication(sys.argv)
dialog = Dialog(entries)
dialog.show()
filter = EventFilter(dialog)
app.installEventFilter(filter)
sys.exit(_exec(app))
if __name__ == '__main__':
import argparse
source_dir = Path(__file__).parent.parent
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Triage image comparison test failures.
If no arguments are provided, it assumes you ran the tests at the
top-level of a source checkout as `pytest .`.
Keys:
left/right: Move between test, expected and diff images
up/down: Move between tests
A: Accept test. Copy the test result to the source tree.
R: Reject test. Copy the expected result to the source tree.
""")
parser.add_argument("result_images", type=Path, nargs='?',
default=source_dir / 'result_images',
help="The location of the result_images directory")
parser.add_argument("source", type=Path, nargs='?', default=source_dir,
help="The location of the matplotlib source tree")
args = parser.parse_args()
launch(args.result_images, args.source)
| Entry |
python | pypa__setuptools | setuptools/tests/test_install_scripts.py | {
"start": 186,
"end": 3433
} | class ____:
settings = dict(
name='foo',
entry_points={'console_scripts': ['foo=foo:foo']},
version='0.0',
)
unix_exe = '/usr/dummy-test-path/local/bin/python'
unix_spaces_exe = '/usr/bin/env dummy-test-python'
win32_exe = 'C:\\Dummy Test Path\\Program Files\\Python 3.6\\python.exe'
def _run_install_scripts(self, install_dir, executable=None):
dist = Distribution(self.settings)
dist.script_name = 'setup.py'
cmd = install_scripts(dist)
cmd.install_dir = install_dir
if executable is not None:
bs = cmd.get_finalized_command('build_scripts')
bs.executable = executable
cmd.ensure_finalized()
with contexts.quiet():
cmd.run()
@pytest.mark.skipif(sys.platform == 'win32', reason='non-Windows only')
def test_sys_executable_escaping_unix(self, tmpdir, monkeypatch):
"""
Ensure that shebang is not quoted on Unix when getting the Python exe
from sys.executable.
"""
expected = f'#!{self.unix_exe}\n'
monkeypatch.setattr('sys.executable', self.unix_exe)
with tmpdir.as_cwd():
self._run_install_scripts(str(tmpdir))
with open(str(tmpdir.join('foo')), 'r', encoding="utf-8") as f:
actual = f.readline()
assert actual == expected
@pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
def test_sys_executable_escaping_win32(self, tmpdir, monkeypatch):
"""
Ensure that shebang is quoted on Windows when getting the Python exe
from sys.executable and it contains a space.
"""
expected = f'#!"{self.win32_exe}"\n'
monkeypatch.setattr('sys.executable', self.win32_exe)
with tmpdir.as_cwd():
self._run_install_scripts(str(tmpdir))
with open(str(tmpdir.join('foo-script.py')), 'r', encoding="utf-8") as f:
actual = f.readline()
assert actual == expected
@pytest.mark.skipif(sys.platform == 'win32', reason='non-Windows only')
def test_executable_with_spaces_escaping_unix(self, tmpdir):
"""
Ensure that shebang on Unix is not quoted, even when
a value with spaces
is specified using --executable.
"""
expected = f'#!{self.unix_spaces_exe}\n'
with tmpdir.as_cwd():
self._run_install_scripts(str(tmpdir), self.unix_spaces_exe)
with open(str(tmpdir.join('foo')), 'r', encoding="utf-8") as f:
actual = f.readline()
assert actual == expected
@pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
def test_executable_arg_escaping_win32(self, tmpdir):
"""
Ensure that shebang on Windows is quoted when
getting a path with spaces
from --executable, that is itself properly quoted.
"""
expected = f'#!"{self.win32_exe}"\n'
with tmpdir.as_cwd():
self._run_install_scripts(str(tmpdir), '"' + self.win32_exe + '"')
with open(str(tmpdir.join('foo-script.py')), 'r', encoding="utf-8") as f:
actual = f.readline()
assert actual == expected
| TestInstallScripts |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/pg_tf.py | {
"start": 3395,
"end": 6669
} | class ____:
def __init__(self, D, ft, hidden_layer_sizes=[]):
self.ft = ft
self.costs = []
# create the graph
self.layers = []
M1 = D
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, lambda x: x)
self.layers.append(layer)
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, D), name='X')
self.Y = tf.placeholder(tf.float32, shape=(None,), name='Y')
# calculate output and cost
Z = self.X
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = tf.reshape(Z, [-1]) # the output
self.predict_op = Y_hat
cost = tf.reduce_sum(tf.square(self.Y - Y_hat))
self.cost = cost
self.train_op = tf.train.AdamOptimizer(1e-1).minimize(cost)
def set_session(self, session):
self.session = session
def partial_fit(self, X, Y):
X = np.atleast_2d(X)
X = self.ft.transform(X)
Y = np.atleast_1d(Y)
self.session.run(self.train_op, feed_dict={self.X: X, self.Y: Y})
cost = self.session.run(self.cost, feed_dict={self.X: X, self.Y: Y})
self.costs.append(cost)
def predict(self, X):
X = np.atleast_2d(X)
X = self.ft.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def play_one_td(env, pmodel, vmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
prev_observation = observation
observation, reward, done, info = env.step([action])
totalreward += reward
# update the models
if done:
G = reward
else:
V_next = vmodel.predict(observation)
G = reward + gamma*V_next
advantage = G - vmodel.predict(prev_observation)
pmodel.partial_fit(prev_observation, action, advantage)
vmodel.partial_fit(prev_observation, G)
iters += 1
return totalreward, iters
def main():
env = gym.make('MountainCarContinuous-v0')
ft = FeatureTransformer(env, n_components=100)
D = ft.dimensions
pmodel = PolicyModel(D, ft, [])
vmodel = ValueModel(D, ft, [])
init = tf.global_variables_initializer()
session = tf.InteractiveSession()
session.run(init)
pmodel.set_session(session)
vmodel.set_session(session)
gamma = 0.95
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 50
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
totalreward, num_steps = play_one_td(env, pmodel, vmodel, gamma)
totalrewards[n] = totalreward
if n % 1 == 0:
print("episode:", n, "total reward: %.1f" % totalreward, "num steps: %d" % num_steps, "avg reward (last 100): %.1f" % totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, vmodel)
if __name__ == '__main__':
main()
| ValueModel |
python | mahmoud__boltons | boltons/timeutils.py | {
"start": 18183,
"end": 20306
} | class ____(tzinfo):
"""Copied directly from the Python docs, the ``USTimeZone`` is a
:class:`datetime.tzinfo` subtype used to create the
:data:`Eastern`, :data:`Central`, :data:`Mountain`, and
:data:`Pacific` tzinfo types.
"""
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find start and end times for US DST. For years before 1967, return
# ZERO for no DST.
if 2006 < dt.year:
dststart, dstend = DSTSTART_2007, DSTEND_2007
elif 1986 < dt.year < 2007:
dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006
elif 1966 < dt.year < 1987:
dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986
else:
return ZERO
start = _first_sunday_on_or_after(dststart.replace(year=dt.year))
end = _first_sunday_on_or_after(dstend.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone
# from dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| USTimeZone |
python | huggingface__transformers | src/transformers/time_series_utils.py | {
"start": 2384,
"end": 2583
} | class ____(nn.Module):
def __init__(self, function):
super().__init__()
self.function = function
def forward(self, x, *args):
return self.function(x, *args)
| LambdaLayer |
python | django-import-export__django-import-export | tests/core/tests/test_tmp_storages.py | {
"start": 1001,
"end": 3635
} | class ____(TestCase):
def setUp(self):
self.test_string = b"""
id,name,author,author_email,imported,published,price,categories
2,Bar,1,,0,,,
1,Foo,,,0,,,
"""
def test_temp_folder_storage(self):
tmp_storage = TempFolderStorage()
tmp_storage.save(self.test_string)
name = tmp_storage.name
tmp_storage = TempFolderStorage(name=name)
self.assertEqual(self.test_string.decode(), tmp_storage.read())
self.assertTrue(os.path.isfile(tmp_storage.get_full_path()))
tmp_storage.remove()
self.assertFalse(os.path.isfile(tmp_storage.get_full_path()))
def test_temp_folder_storage_read_with_encoding(self):
tmp_storage = TestTempFolderStorage(encoding="utf-8")
tmp_storage.name = "f"
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
tmp_storage.read()
mock_file.assert_called_with("/tmp/f", "r", encoding="utf-8")
def test_cache_storage(self):
tmp_storage = CacheStorage()
tmp_storage.save(self.test_string)
name = tmp_storage.name
tmp_storage = CacheStorage(name=name)
self.assertEqual(self.test_string, tmp_storage.read())
self.assertIsNotNone(cache.get(tmp_storage.CACHE_PREFIX + tmp_storage.name))
tmp_storage.remove()
self.assertIsNone(cache.get(tmp_storage.CACHE_PREFIX + tmp_storage.name))
def test_cache_storage_read_with_encoding(self):
tmp_storage = CacheStorage()
tmp_storage.name = "f"
cache.set("django-import-export-f", 101)
res = tmp_storage.read()
self.assertEqual(101, res)
def test_cache_storage_read_with_encoding_unicode_chars(self):
tmp_storage = CacheStorage()
tmp_storage.name = "f"
tmp_storage.save("àèìòùçñ")
res = tmp_storage.read()
self.assertEqual("àèìòùçñ", res)
def test_media_storage(self):
tmp_storage = MediaStorage()
tmp_storage.save(self.test_string)
name = tmp_storage.name
tmp_storage = MediaStorage(name=name)
self.assertEqual(self.test_string, tmp_storage.read())
self.assertTrue(default_storage.exists(tmp_storage.get_full_path()))
tmp_storage.remove()
self.assertFalse(default_storage.exists(tmp_storage.get_full_path()))
def test_media_storage_read_with_encoding(self):
tmp_storage = TestMediaStorage()
tmp_storage.name = "f"
with patch.object(FileSystemStorage, "open") as mock_open:
tmp_storage.read()
mock_open.assert_called_with("f", mode="rb")
| TempStoragesTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module2.py | {
"start": 402,
"end": 470
} | class ____:
def meth(self, param: complex) -> None:
...
| NTC |
python | facebook__pyre-check | client/commands/daemon_querier.py | {
"start": 2850,
"end": 4259
} | class ____(abc.ABC):
@abc.abstractmethod
async def get_type_errors(
self,
paths: Iterable[Path],
) -> Union[DaemonQueryFailure, Dict[Path, List[error.Error]]]:
raise NotImplementedError()
@abc.abstractmethod
async def get_type_coverage(
self,
path: Path,
) -> Union[DaemonQueryFailure, Optional[lsp.TypeCoverageResponse]]:
raise NotImplementedError()
@abc.abstractmethod
async def handle_file_opened(
self,
path: Path,
code: str,
) -> Union[daemon_connection.DaemonConnectionFailure, str]:
raise NotImplementedError()
@abc.abstractmethod
async def handle_file_closed(
self,
path: Path,
) -> Union[daemon_connection.DaemonConnectionFailure, str]:
raise NotImplementedError()
@abc.abstractmethod
async def update_overlay(
self,
path: Path,
code: str,
) -> Union[daemon_connection.DaemonConnectionFailure, str]:
raise NotImplementedError()
@abc.abstractmethod
async def handle_register_client(
self,
) -> Union[daemon_connection.DaemonConnectionFailure, str]:
raise NotImplementedError()
@abc.abstractmethod
async def handle_dispose_client(
self,
) -> Union[daemon_connection.DaemonConnectionFailure, str]:
raise NotImplementedError()
| AbstractDaemonQuerier |
python | spyder-ide__spyder | spyder/plugins/tours/widgets.py | {
"start": 37645,
"end": 43469
} | class ____(QDialog, SvgToScaledPixmap):
"""Initial widget with tour."""
def __init__(self, parent, tour_function):
super().__init__(parent)
if MAC:
flags = (self.windowFlags() | Qt.WindowStaysOnTopHint
& ~Qt.WindowContextHelpButtonHint)
else:
flags = self.windowFlags() & ~Qt.WindowContextHelpButtonHint
self.setWindowFlags(flags)
self.setWindowTitle(_("Spyder tour"))
self.tour_function = tour_function
# Image
image_label = QLabel(self)
image_label.setPixmap(
self.svg_to_scaled_pixmap(
"tour-spyder-logo", rescale=DialogStyle.IconScaleFactor
)
)
images_layout = QHBoxLayout()
images_layout.addStretch()
images_layout.addWidget(image_label)
images_layout.addStretch()
if MAC:
images_layout.setContentsMargins(0, -5, 20, 0)
else:
images_layout.setContentsMargins(0, -8, 35, 0)
# Label
tour_label_title = QLabel(_("Welcome to Spyder!"))
tour_label_title.setObjectName("title")
tour_label_title.setWordWrap(True)
tour_label = QLabel(
_("Check out our interactive tour to "
"explore some of Spyder's panes and features."))
tour_label.setObjectName("content")
tour_label.setWordWrap(True)
tour_label.setFixedWidth(340)
# Buttons
self.launch_tour_button = QPushButton(_('Start tour'))
self.launch_tour_button.setObjectName("launch-button")
self.launch_tour_button.setAutoDefault(False)
self.dismiss_button = QPushButton(_('Dismiss'))
self.dismiss_button.setObjectName("dismiss-button")
self.dismiss_button.setAutoDefault(False)
bbox = SpyderDialogButtonBox()
bbox.addButton(self.launch_tour_button, QDialogButtonBox.ActionRole)
bbox.addButton(self.dismiss_button, QDialogButtonBox.RejectRole)
bbox.layout().setSpacing(3 * AppStyle.MarginSize)
bbox.setStyleSheet(self._buttons_stylesheet)
layout = QHBoxLayout()
layout.addLayout(images_layout)
label_layout = QVBoxLayout()
label_layout.addWidget(tour_label_title)
if not MAC:
label_layout.addSpacing(3)
label_layout.addWidget(tour_label)
else:
label_layout.addWidget(tour_label)
label_layout.addSpacing(10)
vertical_layout = QVBoxLayout()
if not MAC:
vertical_layout.addStretch()
vertical_layout.addLayout(label_layout)
vertical_layout.addSpacing(20)
vertical_layout.addWidget(bbox)
vertical_layout.addStretch()
else:
vertical_layout.addLayout(label_layout)
vertical_layout.addWidget(bbox)
general_layout = QHBoxLayout()
if not MAC:
general_layout.addStretch()
general_layout.addLayout(layout)
general_layout.addSpacing(1)
general_layout.addLayout(vertical_layout)
general_layout.addStretch()
else:
general_layout.addLayout(layout)
general_layout.addLayout(vertical_layout)
self.setLayout(general_layout)
self.setStyleSheet(self._main_stylesheet)
self.launch_tour_button.clicked.connect(self._start_tour)
self.dismiss_button.clicked.connect(self.close)
self.setContentsMargins(18, 40, 18, 40)
if MAC:
general_layout.setSizeConstraint(QLayout.SetFixedSize)
else:
self.setFixedSize(640, 280)
def _start_tour(self):
self.close()
self.tour_function()
@property
def _main_stylesheet(self):
css = qstylizer.style.StyleSheet()
# Set background color
for widget in ["QDialog", "QLabel"]:
css[widget].setValues(
backgroundColor=DialogStyle.BackgroundColor
)
# Set font size for QLabels
css["QLabel#title"].setValues(
fontSize=DialogStyle.TitleFontSize
)
css["QLabel#content"].setValues(
fontSize=DialogStyle.ContentFontSize
)
return css.toString()
@property
def _buttons_stylesheet(self):
css = qstylizer.style.StyleSheet()
# Set base style for buttons
css.QPushButton.setValues(
color=SpyderPalette.COLOR_TEXT_1,
fontSize=DialogStyle.ButtonsFontSize,
padding=DialogStyle.ButtonsPadding,
)
# Set style for launch button
css["QPushButton#launch-button"].setValues(
backgroundColor=SpyderPalette.COLOR_ACCENT_2,
borderColor=SpyderPalette.COLOR_ACCENT_2,
)
css["QPushButton#launch-button:hover:!pressed"].setValues(
backgroundColor=SpyderPalette.COLOR_ACCENT_3,
)
css["QPushButton#launch-button:pressed"].setValues(
backgroundColor=SpyderPalette.COLOR_ACCENT_4,
)
# Set style for dismiss button
css["QPushButton#dismiss-button"].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_4,
borderColor=SpyderPalette.COLOR_BACKGROUND_4,
)
css["QPushButton#dismiss-button:hover:!pressed"].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_5,
)
css["QPushButton#dismiss-button:pressed"].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_6,
)
return css.toString()
# ----------------------------------------------------------------------------
# Used for testing the functionality
# ----------------------------------------------------------------------------
| OpenTourDialog |
python | pytorch__pytorch | torchgen/_autoheuristic/ah_tree.py | {
"start": 768,
"end": 9899
} | class ____:
"""
Custom decision tree implementation that mimics some of the sklearn API.
The purpose of this class it to be able to perform transformations, such as custom pruning, which
does not seem to be easy with sklearn.
"""
def __init__(self, sklearn_tree: Any, feature_names: list[str]) -> None:
self.feature_names = feature_names
self.root = self._convert_sklearn_tree(sklearn_tree.tree_)
self.classes_: list[str] = sklearn_tree.classes_
def _convert_sklearn_tree(
self, sklearn_tree: Any, node_id: int = 0
) -> DecisionTreeNode:
class_probs = sklearn_tree.value[node_id][0]
num_samples = sklearn_tree.n_node_samples[node_id]
if sklearn_tree.feature[node_id] != _tree.TREE_UNDEFINED:
feature_index = sklearn_tree.feature[node_id]
feature = self.feature_names[feature_index]
left = self._convert_sklearn_tree(
sklearn_tree, sklearn_tree.children_left[node_id]
)
right = self._convert_sklearn_tree(
sklearn_tree, sklearn_tree.children_right[node_id]
)
return DecisionTreeNode(
feature=feature,
threshold=sklearn_tree.threshold[node_id],
left=left,
right=right,
class_probs=class_probs,
num_samples=num_samples,
node_id=node_id,
)
else:
return DecisionTreeNode(
class_probs=class_probs, num_samples=num_samples, node_id=node_id
)
def prune(self, df: Any, target_col: str, k: int) -> None:
self.root = self._prune_tree(self.root, df, target_col, k)
def _prune_tree(
self, node: DecisionTreeNode, df: Any, target_col: str, k: int
) -> DecisionTreeNode:
if node.is_leaf():
return node
left_df = df[df[node.feature] <= node.threshold]
right_df = df[df[node.feature] > node.threshold]
# number of unique classes in the left and right subtrees
left_counts = left_df[target_col].nunique()
right_counts = right_df[target_col].nunique()
# for ranking, we want to ensure that we return at least k classes, so if we have less than k classes in the
# left or right subtree, we remove the split and make this node a leaf node
if left_counts < k or right_counts < k:
return DecisionTreeNode(class_probs=node.class_probs)
assert node.left is not None, "expected left child to exist"
node.left = self._prune_tree(node.left, left_df, target_col, k)
assert node.right is not None, "expected right child to exist"
node.right = self._prune_tree(node.right, right_df, target_col, k)
return node
def to_dot(self) -> str:
dot = "digraph DecisionTree {\n"
dot += ' node [fontname="helvetica"];\n'
dot += ' edge [fontname="helvetica"];\n'
dot += self._node_to_dot(self.root)
dot += "}"
return dot
def _node_to_dot(
self, node: DecisionTreeNode, parent_id: int = 0, edge_label: str = ""
) -> str:
if node is None:
return ""
node_id = id(node)
# Format class_probs array with line breaks
class_probs_str = self._format_class_probs_array(
node.class_probs, node.num_samples
)
if node.is_leaf():
label = class_probs_str
shape = "box"
else:
feature_name = f"{node.feature}"
label = f"{feature_name} <= {node.threshold:.2f}\\n{class_probs_str}"
shape = "oval"
dot = f' {node_id} [label="{label}", shape={shape}];\n'
if parent_id != 0:
dot += f' {parent_id} -> {node_id} [label="{edge_label}"];\n'
if not node.is_leaf():
assert node.left is not None, "expected left child to exist"
dot += self._node_to_dot(node.left, node_id, "<=")
assert node.right is not None, "expected right child to exist"
dot += self._node_to_dot(node.right, node_id, ">")
return dot
def _format_class_prob(self, num: float) -> str:
if num == 0:
return "0"
return f"{num:.2f}"
def _format_class_probs_array(
self, class_probs: Any, num_samples: int, max_per_line: int = 5
) -> str:
# add line breaks to avoid very long lines
flat_class_probs = class_probs.flatten()
formatted = [self._format_class_prob(v) for v in flat_class_probs]
lines = [
formatted[i : i + max_per_line]
for i in range(0, len(formatted), max_per_line)
]
return f"num_samples={num_samples}\\n" + "\\n".join(
[", ".join(line) for line in lines]
)
def predict(self, X: Any) -> Any:
predictions = [self._predict_single(x) for _, x in X.iterrows()]
return np.array(predictions)
def predict_proba(self, X: Any) -> Any:
return np.array([self._predict_proba_single(x) for _, x in X.iterrows()])
def _get_leaf(self, X: Any) -> DecisionTreeNode:
node = self.root
while not node.is_leaf():
if X[node.feature] <= node.threshold:
assert node.left is not None, "expected left child to exist"
node = node.left
else:
assert node.right is not None, "expected right child to exist"
node = node.right
return node
def _predict_single(self, x: Any) -> str:
node = self._get_leaf(x)
# map index to class name
return self.classes_[np.argmax(node.class_probs)]
def _predict_proba_single(self, x: Any) -> Any:
node = self._get_leaf(x)
return node.class_probs
def apply(self, X: Any) -> Any:
ids = [self._apply_single(x) for _, x in X.iterrows()]
return np.array(ids)
def _apply_single(self, x: Any) -> int:
node = self._get_leaf(x)
return node.id
def codegen(
self,
dummy_col_2_col_val: dict[str, tuple[str, Any]],
lines: list[str],
unsafe_leaves: list[int],
) -> None:
# generates python code for the decision tree
def codegen_node(node: DecisionTreeNode, depth: int) -> None:
indent = " " * (depth + 1)
if node.is_leaf():
lines.append(handle_leaf(node, indent, unsafe_leaves))
else:
name = node.feature
threshold = node.threshold
if name in dummy_col_2_col_val:
(orig_name, value) = dummy_col_2_col_val[name]
predicate = f"{indent}if str(context.get_value('{orig_name}')) != '{value}':"
assert threshold == 0.5, (
f"expected threshold to be 0.5 but is {threshold}"
)
else:
predicate = (
f"{indent}if context.get_value('{name}') <= {threshold}:"
)
lines.append(predicate)
assert node.left is not None, "expected left child to exist"
codegen_node(node.left, depth + 1)
lines.append(f"{indent}else:")
assert node.right is not None, "expected right child to exist"
codegen_node(node.right, depth + 1)
def handle_leaf(
node: DecisionTreeNode, indent: str, unsafe_leaves: list[int]
) -> str:
"""
This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic
will return "unsure" (i.e. None).
"""
if node.id in unsafe_leaves:
return f"{indent}return None"
class_probas = node.class_probs
return f"{indent}return {best_probas_and_indices(class_probas)}"
def best_probas_and_indices(class_probas: Any) -> str:
"""
Given a list of tuples (proba, idx), this function returns a string in which the tuples are
sorted by proba in descending order. E.g.:
Given class_probas=[(0.3, 0), (0.5, 1), (0.2, 2)]
this function returns
"[(0.5, 1), (0.3, 0), (0.2, 2)]"
"""
# we generate a list of tuples (proba, idx) sorted by proba in descending order
# idx is the index of a choice
# we only generate a tuple if proba > 0
probas_indices_sorted = sorted(
[
(proba, index)
for index, proba in enumerate(class_probas)
if proba > 0
],
key=lambda x: x[0],
reverse=True,
)
probas_indices_sorted_str = ", ".join(
f"({value:.3f}, {index})" for value, index in probas_indices_sorted
)
return f"[{probas_indices_sorted_str}]"
codegen_node(self.root, 1)
| DecisionTree |
python | run-llama__llama_index | llama-index-core/llama_index/core/postprocessor/node_recency.py | {
"start": 5597,
"end": 7591
} | class ____(BaseNodePostprocessor):
"""
Time-weighted post-processor.
Reranks a set of nodes based on their recency.
"""
time_decay: float = Field(default=0.99)
last_accessed_key: str = "__last_accessed__"
time_access_refresh: bool = True
# optionally set now (makes it easier to test)
now: Optional[float] = None
top_k: int = 1
@classmethod
def class_name(cls) -> str:
return "TimeWeightedPostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
now = self.now or datetime.now().timestamp()
# TODO: refactor with get_top_k_embeddings
similarities = []
for node_with_score in nodes:
# embedding similarity score
score = node_with_score.score or 1.0
node = node_with_score.node
# time score
if node.metadata is None:
raise ValueError("metadata is None")
last_accessed = node.metadata.get(self.last_accessed_key, None)
if last_accessed is None:
last_accessed = now
hours_passed = (now - last_accessed) / 3600
time_similarity = (1 - self.time_decay) ** hours_passed
similarity = score + time_similarity
similarities.append(similarity)
sorted_tups = sorted(zip(similarities, nodes), key=lambda x: x[0], reverse=True)
top_k = min(self.top_k, len(sorted_tups))
result_tups = sorted_tups[:top_k]
result_nodes = [
NodeWithScore(node=n.node, score=score) for score, n in result_tups
]
# set __last_accessed__ to now
if self.time_access_refresh:
for node_with_score in result_nodes:
node_with_score.node.metadata[self.last_accessed_key] = now
return result_nodes
| TimeWeightedPostprocessor |
python | doocs__leetcode | solution/0100-0199/0119.Pascal's Triangle II/Solution.py | {
"start": 0,
"end": 232
} | class ____:
def getRow(self, rowIndex: int) -> List[int]:
f = [1] * (rowIndex + 1)
for i in range(2, rowIndex + 1):
for j in range(i - 1, 0, -1):
f[j] += f[j - 1]
return f
| Solution |
python | marshmallow-code__marshmallow | examples/flask_example.py | {
"start": 671,
"end": 826
} | class ____(db.Model): # type: ignore[name-defined]
id: Mapped[int] = mapped_column(primary_key=True)
first: Mapped[str]
last: Mapped[str]
| Author |
python | TheAlgorithms__Python | data_structures/heap/randomized_heap.py | {
"start": 1812,
"end": 5297
} | class ____[T: bool]:
"""
A data structure that allows inserting a new value and to pop the smallest
values. Both operations take O(logN) time where N is the size of the
structure.
Wiki: https://en.wikipedia.org/wiki/Randomized_meldable_heap
>>> RandomizedHeap([2, 3, 1, 5, 1, 7]).to_sorted_list()
[1, 1, 2, 3, 5, 7]
>>> rh = RandomizedHeap()
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
>>> rh.insert(1)
>>> rh.insert(-1)
>>> rh.insert(0)
>>> rh.to_sorted_list()
[-1, 0, 1]
"""
def __init__(self, data: Iterable[T] | None = ()) -> None:
"""
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
self._root: RandomizedHeapNode[T] | None = None
if data:
for item in data:
self.insert(item)
def insert(self, value: T) -> None:
"""
Insert the value into the heap.
>>> rh = RandomizedHeap()
>>> rh.insert(3)
>>> rh.insert(1)
>>> rh.insert(3)
>>> rh.insert(7)
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
self._root = RandomizedHeapNode.merge(self._root, RandomizedHeapNode(value))
def pop(self) -> T | None:
"""
Pop the smallest value from the heap and return it.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.pop()
1
>>> rh.pop()
3
>>> rh.pop()
3
>>> rh.pop()
7
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
result = self.top()
if self._root is None:
return None
self._root = RandomizedHeapNode.merge(self._root.left, self._root.right)
return result
def top(self) -> T:
"""
Return the smallest value from the heap.
>>> rh = RandomizedHeap()
>>> rh.insert(3)
>>> rh.top()
3
>>> rh.insert(1)
>>> rh.top()
1
>>> rh.insert(3)
>>> rh.top()
1
>>> rh.insert(7)
>>> rh.top()
1
"""
if not self._root:
raise IndexError("Can't get top element for the empty heap.")
return self._root.value
def clear(self) -> None:
"""
Clear the heap.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.clear()
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
self._root = None
def to_sorted_list(self) -> list[Any]:
"""
Returns sorted list containing all the values in the heap.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
result = []
while self:
result.append(self.pop())
return result
def __bool__(self) -> bool:
"""
Check if the heap is not empty.
>>> rh = RandomizedHeap()
>>> bool(rh)
False
>>> rh.insert(1)
>>> bool(rh)
True
>>> rh.clear()
>>> bool(rh)
False
"""
return self._root is not None
if __name__ == "__main__":
import doctest
doctest.testmod()
| RandomizedHeap |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.