language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | crytic__slither | slither/core/variables/local_variable.py | {
"start": 454,
"end": 2298
} | class ____(Variable):
def __init__(self) -> None:
super().__init__()
self._location: Optional[str] = None
self._function: Optional["Function"] = None
def set_function(self, function: "Function") -> None:
self._function = function
@property
def function(self) -> "Function":
assert self._function
return self._function
def set_location(self, loc: str) -> None:
self._location = loc
@property
def location(self) -> Optional[str]:
"""
Variable Location
Can be storage/memory or default
Returns:
(str)
"""
return self._location
@property
def is_scalar(self) -> bool:
return isinstance(self.type, ElementaryType) and not self.is_storage
@property
def is_storage(self) -> bool:
"""
Return true if the variable is located in storage
See https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
Returns:
(bool)
"""
# pylint: disable=import-outside-toplevel
from slither.core.solidity_types.array_type import ArrayType
if self.location == "memory":
return False
if self.location == "calldata":
return False
# Use by slithIR SSA
if self.location == "reference_to_storage":
return False
if self.location == "storage":
return True
if isinstance(self.type, (ArrayType, MappingType)):
return True
if isinstance(self.type, UserDefinedType):
return isinstance(self.type.type, Structure)
return False
@property
def canonical_name(self) -> str:
return f"{self.function.canonical_name}.{self.name}"
| LocalVariable |
python | pytorch__pytorch | test/inductor/test_pallas.py | {
"start": 1679,
"end": 22244
} | class ____:
"""Basic tests for Pallas backend functionality (parameterized by DEVICE). Mixin only, not collected."""
def _compile(self, fn):
key = "cuda_backend" if self.DEVICE == "cuda" else "cpu_backend"
return torch.compile(fn, backend="inductor", options={key: "pallas"})
def test_simple_add(self):
"""Test basic element-wise addition."""
def fn(a, b):
return a + b
compiled = self._compile(fn)
a = torch.randn(1024, device=self.DEVICE)
b = torch.randn(1024, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_simple_mul(self):
"""Test basic element-wise multiplication."""
def fn(a, b):
return a * b
compiled = self._compile(fn)
a = torch.randn(1024, device=self.DEVICE)
b = torch.randn(1024, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_sin(self):
"""Test sin operation."""
def fn(x):
return torch.sin(x)
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_fused_ops(self):
"""Test fused operations (sin + add)."""
def fn(x, y):
return x.sin() + y
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE)
y = torch.randn(1024, device=self.DEVICE)
result = compiled(x, y)
expected = fn(x, y)
self.assertEqual(result, expected)
def test_exp_log(self):
"""Test exp and log operations."""
def fn(x):
return torch.log(torch.exp(x))
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_sqrt(self):
"""Test sqrt operation."""
def fn(x):
return torch.sqrt(x)
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE).abs() # Ensure positive for sqrt
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_tanh(self):
"""Test tanh operation."""
def fn(x):
return torch.tanh(x)
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_abs_neg(self):
"""Test abs and neg operations."""
def fn(x):
return torch.abs(-x)
compiled = self._compile(fn)
x = torch.randn(1024, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_maximum_minimum(self):
"""Test maximum and minimum operations."""
def fn(a, b):
return torch.maximum(a, b) + torch.minimum(a, b)
compiled = self._compile(fn)
a = torch.randn(1024, device=self.DEVICE)
b = torch.randn(1024, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
@unittest.skipUnless(has_triton(), "requires triton")
@unittest.skip("Random ops not yet implemented in Pallas backend")
def test_random_consistency(self):
"""Test that random number generation is consistent across backends."""
seed = 1234
shape = (3, 3)
dtype = torch.float32
for rand_fn in [
functools.partial(torch.rand, shape, dtype=dtype, device="cuda"),
functools.partial(torch.randn, shape, dtype=dtype, device="cuda"),
]:
@torch.compile(backend="inductor", options={"cuda_backend": "pallas"})
def get_rand_pallas():
return rand_fn()
@torch.compile(backend="inductor", options={"cuda_backend": "triton"})
def get_rand_triton():
return rand_fn()
torch.manual_seed(seed)
pallas_output = get_rand_pallas()
torch.manual_seed(seed)
triton_output = get_rand_triton()
self.assertEqual(pallas_output, triton_output)
def test_compile_options(self):
"""Test that Pallas backend is properly configured."""
@torch.compile(
backend="inductor",
options={
("cuda_backend" if self.DEVICE == "cuda" else "cpu_backend"): "pallas"
},
)
def pallas_fn(a, b):
return a.sin() + b.cos()
_, (code,) = run_and_get_code(
pallas_fn,
torch.randn(64, device=self.DEVICE),
torch.randn(64, device=self.DEVICE),
)
# Verify Pallas-specific code generation
self.assertIn("import jax", code)
self.assertIn("import jax.numpy as jnp", code)
self.assertIn("from jax.experimental import pallas as pl", code)
def test_jax_jit_wrapper_is_emitted(self):
"""Ensure generated Pallas code wraps pl.pallas_call in jax.jit."""
key = "cuda_backend" if self.DEVICE == "cuda" else "cpu_backend"
@torch.compile(backend="inductor", options={key: "pallas"})
def pallas_fn(a, b):
return a + b
_, (code,) = run_and_get_code(
pallas_fn,
torch.randn(32, device=self.DEVICE),
torch.randn(32, device=self.DEVICE),
)
kernel_match = re.search(r"def (pallas_[A-Za-z0-9_]+)_kernel", code)
self.assertIsNotNone(kernel_match)
kernel_name = kernel_match.group(1)
wrapper_name = f"{kernel_name}_jit_wrapper"
self.assertIn(wrapper_name, code)
start = code.index(f"def {wrapper_name}")
end = code.index(f"def {kernel_name}_main", start)
wrapper_block = code[start:end]
self.assertIn("jax.jit", code)
self.assertIn("donate_argnums", code)
self.assertIn("input_output_aliases", wrapper_block)
if self.DEVICE == "cuda":
self.assertNotIn(".copy_(", code)
self.assertNotIn("torch.", wrapper_block)
def test_2d_tensor(self):
"""Test with 2D tensors (though current implementation flattens)."""
def fn(x, y):
return x + y
compiled = self._compile(fn)
x = torch.randn(32, 32, device=self.DEVICE)
y = torch.randn(32, 32, device=self.DEVICE)
result = compiled(x, y)
expected = fn(x, y)
self.assertEqual(result, expected)
def test_different_shapes(self):
"""Test with different tensor shapes."""
def fn(x):
return x * 2.0
compiled = self._compile(fn)
for shape in [(64,), (128,), (256,), (1024,)]:
x = torch.randn(shape, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_contiguous_index_validation(self):
"""Test that contiguous index validation works correctly end-to-end."""
# Test 1: Contiguous operations should work
def contiguous_add(a, b):
return a + b
compiled = self._compile(contiguous_add)
a = torch.randn(1024, device=self.DEVICE)
b = torch.randn(1024, device=self.DEVICE)
result = compiled(a, b)
expected = contiguous_add(a, b)
self.assertEqual(result, expected)
# Test 2: Operations on contiguous tensors should work
def contiguous_mul(x):
return x * 2.0
compiled = self._compile(contiguous_mul)
x = torch.randn(128, 8, device=self.DEVICE)
result = compiled(x)
expected = contiguous_mul(x)
self.assertEqual(result, expected)
# Test 3: Non-contiguous views should work with the simplified dlpack approach
# The direct dlpack conversion handles non-contiguous tensors correctly
def operate_on_tensor(x):
return x.sin()
compiled = self._compile(operate_on_tensor)
# Create a transposed (non-contiguous) view
x = torch.randn(64, 32, device=self.DEVICE)
x_t = x.t() # Non-contiguous view
self.assertFalse(x_t.is_contiguous())
# With the simplified dlpack approach, non-contiguous tensors now work
result = compiled(x_t)
expected = operate_on_tensor(x_t)
self.assertEqual(result, expected)
# Contiguous tensors should also continue to work
x_t_contiguous = x_t.contiguous()
self.assertTrue(x_t_contiguous.is_contiguous())
result = compiled(x_t_contiguous)
expected = operate_on_tensor(x_t_contiguous)
self.assertEqual(result, expected)
def test_strided_int_pallas(self):
"""Test strided access patterns with the Pallas backend."""
def fn(x):
# Access every other element (strided access)
return x[::2] * 2.0
compiled = self._compile(fn)
x = torch.arange(16, dtype=torch.float32, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_strided_offset_pallas(self):
"""Test strided access with offset."""
def fn(x):
# Access every other element starting from index 1
return x[1::2] + 1.0
compiled = self._compile(fn)
x = torch.arange(16, dtype=torch.float32, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_strided_2d_pallas(self):
"""Test strided access on 2D tensors."""
def fn(x):
# Simple operation on 2D tensor
return x * 3.0
compiled = self._compile(fn)
x = torch.randn(8, 16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_non_power_of_2_sizes(self):
"""Test that non-power-of-2 tensor sizes work with masked ops on GPU.
On GPU (Triton backend), Pallas requires power-of-2 sizes. We use masked
loads/stores to handle non-power-of-2 tensors by allocating power-of-2
blocks and masking out invalid elements.
"""
def fn(a, b):
return a + b
compiled = self._compile(fn)
# Test a specific non-power-of-2 size (10)
a = torch.randn(10, device=self.DEVICE)
b = torch.randn(10, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_non_power_of_2_multiple_ops(self):
"""Test non-power-of-2 sizes with multiple operations."""
def fn(x, y):
return x.sin() + y.cos() - (x * y)
compiled = self._compile(fn)
# Non-power-of-2 size: 17
x = torch.randn(17, device=self.DEVICE)
y = torch.randn(17, device=self.DEVICE)
result = compiled(x, y)
expected = fn(x, y)
self.assertEqual(result, expected)
def test_complex_indexing_gather(self):
"""Test complex indexing with gather-like operations."""
def fn(x, indices):
# Use indices to gather elements from x
return x[indices]
compiled = self._compile(fn)
x = torch.arange(16, dtype=torch.float32, device=self.DEVICE)
# Use power-of-2 size for indices (Pallas Triton requirement)
indices = torch.tensor(
[0, 2, 5, 7, 11, 13, 14, 15], dtype=torch.int64, device=self.DEVICE
)
result = compiled(x, indices)
expected = fn(x, indices)
self.assertEqual(result, expected)
def test_complex_indexing_2d(self):
"""Test complex indexing on 2D tensors with integer array indexing."""
if self.DEVICE == "cuda":
# Pallas Triton backend doesn't support gather operations with array indices
# This limitation is in the Pallas/Triton lowering, not our implementation
self.skipTest(
"Multi-dimensional gather not supported on Pallas Triton (CUDA) backend"
)
def fn(x, row_indices):
# Select specific rows using integer array indexing
return x[row_indices, :]
compiled = self._compile(fn)
x = torch.randn(16, 8, device=self.DEVICE)
# Use power-of-2 sizes (Pallas Triton requirement)
row_indices = torch.tensor([0, 2, 5, 7], dtype=torch.int64, device=self.DEVICE)
result = compiled(x, row_indices)
expected = fn(x, row_indices)
self.assertEqual(result, expected)
def test_complex64_mul(self):
"""Test complex64 multiplication."""
def fn(a, b):
return a * b
compiled = self._compile(fn)
a = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
b = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_complex_conj(self):
"""Test complex conjugate."""
def fn(x):
return torch.conj(x)
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex_real(self):
"""Test extracting real part of complex tensor."""
def fn(x):
return torch.real(x)
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex_imag(self):
"""Test extracting imaginary part of complex tensor."""
def fn(x):
return torch.imag(x)
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex_abs(self):
"""Test complex absolute value (magnitude)."""
def fn(x):
return torch.abs(x)
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex128_conj(self):
"""Test complex128 conjugate operation."""
def fn(x):
return torch.conj(x)
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex128, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex_mul_scalar(self):
"""Test complex multiplication with scalar."""
def fn(x):
return x * 2.5
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_complex_conj_mul(self):
"""Test conjugate followed by multiplication."""
def fn(x, y):
return torch.conj(x) * y
compiled = self._compile(fn)
x = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
y = torch.randn(16, dtype=torch.complex64, device=self.DEVICE)
result = compiled(x, y)
expected = fn(x, y)
self.assertEqual(result, expected)
def test_where(self):
"""Test torch.where operation."""
def fn(x, y):
return torch.where(x > 0, x, y)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
y = torch.randn(16, device=self.DEVICE)
result = compiled(x, y)
expected = fn(x, y)
self.assertEqual(result, expected)
def test_clamp(self):
"""Test torch.clamp operation."""
def fn(x):
return torch.clamp(x, -1.0, 1.0)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE) * 2
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_comparison_ops(self):
"""Test comparison operations."""
def fn(a, b):
gt = a > b
lt = a < b
eq = a == b
return gt.float() + lt.float() + eq.float()
compiled = self._compile(fn)
a = torch.randn(16, device=self.DEVICE)
b = torch.randn(16, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_logical_ops(self):
"""Test logical operations."""
def fn(a, b):
return torch.logical_and(a > 0, b > 0).float()
compiled = self._compile(fn)
a = torch.randn(16, device=self.DEVICE)
b = torch.randn(16, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_sign(self):
"""Test sign operation."""
def fn(x):
return torch.sign(x)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_reciprocal(self):
"""Test reciprocal operation."""
def fn(x):
return torch.reciprocal(x)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE) + 1.0 # Avoid zeros
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_square(self):
"""Test square operation."""
def fn(x):
return torch.square(x)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_erf(self):
"""Test erf operation."""
if self.DEVICE == "cuda":
self.skipTest("erf not supported in Pallas GPU (Triton) backend")
def fn(x):
return torch.erf(x)
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_atan2(self):
"""Test atan2 operation."""
def fn(a, b):
return torch.atan2(a, b)
compiled = self._compile(fn)
a = torch.randn(16, device=self.DEVICE)
b = torch.randn(16, device=self.DEVICE)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result, expected)
def test_sum_reduction(self):
"""Test sum reduction."""
def fn(x):
return x.sum()
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_max_reduction(self):
"""Test max reduction."""
def fn(x):
return x.max()
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_min_reduction(self):
"""Test min reduction."""
def fn(x):
return x.min()
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
def test_prod_reduction(self):
"""Test prod reduction."""
if self.DEVICE == "cuda":
self.skipTest("prod reduction not supported in Pallas GPU (Triton) backend")
def fn(x):
# Use smaller values to avoid overflow
return (x * 0.1).prod()
compiled = self._compile(fn)
x = torch.randn(16, device=self.DEVICE)
result = compiled(x)
expected = fn(x)
self.assertEqual(result, expected)
@unittest.skipUnless(has_cuda_pallas(), "requires jax and pallas")
| PallasTestsMixin |
python | coleifer__peewee | peewee.py | {
"start": 164870,
"end": 166490
} | class ____(BitwiseMixin, BigIntegerField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', 0)
super(BitField, self).__init__(*args, **kwargs)
self.__current_flag = 1
def flag(self, value=None):
if value is None:
value = self.__current_flag
self.__current_flag <<= 1
else:
self.__current_flag = value << 1
class FlagDescriptor(ColumnBase):
def __init__(self, field, value):
self._field = field
self._value = value
super(FlagDescriptor, self).__init__()
def clear(self):
return self._field.bin_and(~self._value)
def set(self):
return self._field.bin_or(self._value)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = getattr(instance, self._field.name) or 0
return (value & self._value) != 0
def __set__(self, instance, is_set):
if is_set not in (True, False):
raise ValueError('Value must be either True or False')
value = getattr(instance, self._field.name) or 0
if is_set:
value |= self._value
else:
value &= ~self._value
setattr(instance, self._field.name, value)
def __sql__(self, ctx):
return ctx.sql(self._field.bin_and(self._value) != 0)
return FlagDescriptor(self, value)
| BitField |
python | jamielennox__requests-mock | requests_mock/response.py | {
"start": 7868,
"end": 8157
} | class ____(object):
"""Stores the data being used to process a current URL match."""
def __init__(self, headers, status_code, reason, cookies):
self.headers = headers
self.status_code = status_code
self.reason = reason
self.cookies = cookies
| _Context |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 61531,
"end": 63747
} | class ____(Kosmos2_5PreTrainedModel):
config_class = Kosmos2_5TextConfig
input_modalities = ("text",)
def __init__(self, config: Kosmos2_5TextConfig):
super().__init__(config)
self.model = Kosmos2_5TextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
@add_start_docstrings_to_model_forward(KOSMOS2_5_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=Kosmos2_5TextConfig)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
image_embeds: Optional[torch.Tensor] = None,
image_embeds_position_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPastAndCrossAttentions:
r"""
Returns:
"""
return self.model(
input_ids=input_ids,
attention_mask=attention_mask,
image_embeds=image_embeds,
image_embeds_position_mask=image_embeds_position_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
position_ids=position_ids,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
@add_start_docstrings(
"""
KOSMOS-2.5 Model for generating text and image features. The model consists of a vision encoder and a language model.
""",
KOSMOS2_5_START_DOCSTRING,
)
| Kosmos2_5TextModel |
python | getsentry__sentry | src/sentry/incidents/logic.py | {
"start": 58996,
"end": 79063
} | class ____:
identifier: str | int | None
display: str | None
def get_target_identifier_display_for_integration(
action_type: ActionService,
target_value: str | None,
organization: Organization,
integration_id: int | None,
use_async_lookup: bool = True,
input_channel_id: str | None = None,
integrations: Collection[RpcIntegration] | None = None,
) -> AlertTarget:
if action_type == AlertRuleTriggerAction.Type.SLACK.value:
return _get_target_identifier_display_for_slack(
target_value,
integration_id,
use_async_lookup,
input_channel_id,
integrations,
)
if target_value is None:
raise InvalidTriggerActionError(f"{action_type.name} requires non-null target_value")
return _get_target_identifier_display_from_target_value(
action_type, target_value, organization, integration_id
)
def _get_target_identifier_display_for_slack(
target_value: str | None,
integration_id: int | None,
use_async_lookup: bool = True,
input_channel_id: str | None = None,
integrations: Iterable[RpcIntegration] | None = None,
) -> AlertTarget:
# target_value is the Slack username or channel name
if input_channel_id is not None:
# if we have a value for input_channel_id, just set target identifier to that
return AlertTarget(input_channel_id, target_value)
if target_value is None:
raise InvalidTriggerActionError(
"Slack requires target_value if input_channel_id is not present"
)
if integration_id is None:
raise InvalidTriggerActionError(
"Slack requires integration_id if input_channel_id is not present"
)
target_identifier = _get_alert_rule_trigger_action_slack_channel_id(
target_value, integration_id, use_async_lookup, integrations
)
return AlertTarget(target_identifier, target_value)
def _get_target_identifier_display_from_target_value(
action_type: ActionService,
target_value: str,
organization: Organization,
integration_id: int | None,
) -> AlertTarget:
if action_type == AlertRuleTriggerAction.Type.SLACK.value:
raise ValueError("Call _get_target_identifier_display_for_slack")
elif action_type == AlertRuleTriggerAction.Type.MSTEAMS.value:
# target_value is the MSTeams username or channel name
if integration_id is None:
raise InvalidTriggerActionError("MSTEAMS requires non-null integration_id")
return AlertTarget(
_get_alert_rule_trigger_action_msteams_channel_id(
target_value, organization, integration_id
),
target_value,
)
elif action_type == AlertRuleTriggerAction.Type.DISCORD.value:
if integration_id is None:
raise InvalidTriggerActionError("DISCORD requires non-null integration_id")
return AlertTarget(
_get_alert_rule_trigger_action_discord_channel_id(target_value, integration_id),
target_value,
)
elif action_type == AlertRuleTriggerAction.Type.PAGERDUTY.value:
# target_value is the ID of the PagerDuty service
return _get_alert_rule_trigger_action_pagerduty_service(
target_value, organization, integration_id
)
elif action_type == AlertRuleTriggerAction.Type.OPSGENIE.value:
return get_alert_rule_trigger_action_opsgenie_team(
target_value, organization, integration_id
)
else:
raise Exception("Not implemented")
def _get_alert_rule_trigger_action_slack_channel_id(
name: str,
integration_id: int,
use_async_lookup: bool = True,
integrations: Iterable[RpcIntegration] | None = None,
) -> str:
from sentry.integrations.slack.utils.channel import get_channel_id
if integrations is not None:
try:
integration = next(i for i in integrations if i.id == integration_id)
except StopIteration:
integration = None
else:
integration = integration_service.get_integration(
integration_id=integration_id, status=ObjectStatus.ACTIVE
)
if integration is None:
raise InvalidTriggerActionError("Slack workspace is a required field.")
try:
channel_data = get_channel_id(integration, name, use_async_lookup)
except DuplicateDisplayNameError as e:
domain = integration.metadata["domain_name"]
raise InvalidTriggerActionError(
'Multiple users were found with display name "%s". Please use your username, found at %s/account/settings.'
% (e, domain)
)
if channel_data.timed_out:
raise ChannelLookupTimeoutError(
"Could not find channel %s. We have timed out trying to look for it." % name
)
if channel_data.channel_id is None:
raise InvalidTriggerActionError(
"Could not find channel %s. Channel may not exist, or Sentry may not "
"have been granted permission to access it" % name
)
return channel_data.channel_id
def _get_alert_rule_trigger_action_discord_channel_id(name: str, integration_id: int) -> str | None:
from sentry.integrations.discord.utils.channel import validate_channel_id
integration = integration_service.get_integration(
integration_id=integration_id, status=ObjectStatus.ACTIVE
)
if integration is None:
raise InvalidTriggerActionError("Discord integration not found.")
try:
validate_channel_id(
channel_id=name,
guild_id=integration.external_id,
guild_name=integration.name,
)
except ValidationError as e:
raise InvalidTriggerActionError(e.message)
except IntegrationError:
raise InvalidTriggerActionError("Bad response from Discord channel lookup")
except ApiTimeoutError:
raise ChannelLookupTimeoutError(
"Could not find channel %s. We have timed out trying to look for it." % name
)
return name
def _get_alert_rule_trigger_action_msteams_channel_id(
name: str, organization: Organization, integration_id: int
) -> str:
from sentry.integrations.msteams.utils import get_channel_id
channel_id = get_channel_id(organization, integration_id, name)
if channel_id is None:
# no granting access for msteams channels unlike slack
raise InvalidTriggerActionError("Could not find channel %s." % name)
return channel_id
def _get_alert_rule_trigger_action_pagerduty_service(
target_value: str, organization: Organization, integration_id: int | None
) -> AlertTarget:
from sentry.integrations.pagerduty.utils import get_service
org_integration = integration_service.get_organization_integration(
integration_id=integration_id, organization_id=organization.id
)
service = get_service(org_integration, target_value)
if not service:
raise InvalidTriggerActionError("No PagerDuty service found.")
return AlertTarget(service["id"], service["service_name"])
def get_alert_rule_trigger_action_opsgenie_team(
target_value: str | None, organization: Organization, integration_id: int | None
) -> AlertTarget:
from sentry.integrations.opsgenie.utils import get_team
result = integration_service.organization_context(
organization_id=organization.id, integration_id=integration_id
)
integration = result.integration
oi = result.organization_integration
if integration is None or oi is None:
raise InvalidTriggerActionError("Opsgenie integration not found.")
team = get_team(target_value, oi)
if not team:
raise InvalidTriggerActionError("No Opsgenie team found.")
return AlertTarget(team["id"], team["team"])
def _get_alert_rule_trigger_action_sentry_app(
organization: Organization,
sentry_app_id: int | None,
installations: Collection[RpcSentryAppInstallation] | None,
) -> AlertTarget:
from sentry.sentry_apps.services.app import app_service
if installations is None:
installations = app_service.installations_for_organization(organization_id=organization.id)
for installation in installations:
if installation.sentry_app.id == sentry_app_id:
return AlertTarget(sentry_app_id, installation.sentry_app.name)
raise InvalidTriggerActionError("No SentryApp found.")
def delete_alert_rule_trigger_action(trigger_action: AlertRuleTriggerAction) -> None:
"""
Schedules a deletion for a AlertRuleTriggerAction, and marks it as pending deletion.
Marking it as pending deletion should filter out the object through the manager when querying.
"""
RegionScheduledDeletion.schedule(instance=trigger_action, days=0)
trigger_action.update(status=ObjectStatus.PENDING_DELETION)
def get_actions_for_trigger(
trigger: AlertRuleTrigger,
) -> QuerySet[AlertRuleTriggerAction]:
return AlertRuleTriggerAction.objects.filter(alert_rule_trigger=trigger)
def get_available_action_integrations_for_org(
organization: Organization,
) -> list[RpcIntegration]:
"""
Returns a list of integrations that the organization has installed. Integrations are
filtered by the list of registered providers.
:param organization:
"""
providers = [
registration.integration_provider
for registration in AlertRuleTriggerAction.get_registered_factories()
if registration.integration_provider is not None
]
return integration_service.get_integrations(
status=ObjectStatus.ACTIVE,
org_integration_status=ObjectStatus.ACTIVE,
organization_id=organization.id,
providers=providers,
)
def get_pagerduty_services(organization_id: int, integration_id: int) -> list[tuple[int, str]]:
from sentry.integrations.pagerduty.utils import get_services
org_int = integration_service.get_organization_integration(
organization_id=organization_id, integration_id=integration_id
)
services = get_services(org_int)
return [(s["id"], s["service_name"]) for s in services]
def get_opsgenie_teams(organization_id: int, integration_id: int) -> list[tuple[str, str]]:
org_int = integration_service.get_organization_integration(
organization_id=organization_id, integration_id=integration_id
)
if org_int is None:
return []
teams = []
team_table = org_int.config.get("team_table")
if team_table:
teams = [(team["id"], team["team"]) for team in team_table]
return teams
# TODO: This is temporarily needed to support back and forth translations for snuba / frontend.
# Uses a function from discover to break the aggregate down into parts, and then compare the "field"
# to a list of accepted fields, or a list of fields we need to translate.
# This can be dropped once snuba can handle this aliasing.
SUPPORTED_COLUMNS = [
"tags[sentry:user]",
"tags[sentry:dist]",
"tags[sentry:release]",
"transaction.duration",
]
TRANSLATABLE_COLUMNS = {
"user": "tags[sentry:user]",
"dist": "tags[sentry:dist]",
"release": "tags[sentry:release]",
}
INSIGHTS_FUNCTION_VALID_ARGS_MAP = {
"http_response_rate": ["3", "4", "5"],
"performance_score": [
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.total",
],
}
EAP_COLUMNS = [
"span.duration",
"span.self_time",
"ai.total_tokens.used",
"ai.total_cost",
"cache.item_size",
"http.decoded_response_content_length",
"http.response_content_length",
"http.response_transfer_size",
]
EAP_FUNCTIONS = [
"count",
"count_unique",
"avg",
"p50",
"p75",
"p90",
"p95",
"p99",
"p100",
"max",
"min",
"sum",
"epm",
"failure_rate",
"eps",
"apdex",
]
def get_column_from_aggregate(
aggregate: str, allow_mri: bool, allow_eap: bool = False
) -> str | None:
# These functions exist as SnQLFunction definitions and are not supported in the older
# logic for resolving functions. We parse these using `fields.is_function`, otherwise
# they will fail using the old resolve_field logic.
match = is_function(aggregate)
if match and (
match.group("function") in SPANS_METRICS_FUNCTIONS
or match.group("function") in METRICS_LAYER_UNSUPPORTED_TRANSACTION_METRICS_FUNCTIONS
):
return None if match.group("columns") == "" else match.group("columns")
# Skip additional validation for EAP queries. They don't exist in the old logic.
if match and match.group("function") in EAP_FUNCTIONS and allow_eap:
return match.group("columns")
if allow_mri:
mri_column = _get_column_from_aggregate_with_mri(aggregate)
# Only if the column was allowed, we return it, otherwise we fallback to the old logic.
if mri_column:
return mri_column
function = resolve_field(aggregate)
if function.aggregate is not None:
return function.aggregate[1]
return None
def _get_column_from_aggregate_with_mri(aggregate: str) -> str | None:
match = is_function(aggregate)
if match is None:
return None
function = match.group("function")
columns = match.group("columns")
parsed_mri = parse_mri(columns)
if parsed_mri is None:
return None
available_ops = set(get_available_operations(parsed_mri))
if function not in available_ops:
return None
return columns
def check_aggregate_column_support(
aggregate: str, allow_mri: bool = False, allow_eap: bool = False
) -> bool:
# TODO(ddm): remove `allow_mri` once the experimental feature flag is removed.
column = get_column_from_aggregate(aggregate, allow_mri, allow_eap)
match = is_function(aggregate)
function = match.group("function") if match else None
return (
column is None
or is_measurement(column)
or column in SUPPORTED_COLUMNS
or column in TRANSLATABLE_COLUMNS
or (is_mri(column) and allow_mri)
or (
isinstance(function, str)
and column in INSIGHTS_FUNCTION_VALID_ARGS_MAP.get(function, [])
)
or allow_eap
)
def translate_aggregate_field(
aggregate: str,
reverse: bool = False,
allow_mri: bool = False,
allow_eap: bool = False,
) -> str:
column = get_column_from_aggregate(aggregate, allow_mri, allow_eap)
if not reverse:
if column in TRANSLATABLE_COLUMNS:
return aggregate.replace(column, TRANSLATABLE_COLUMNS[column])
else:
if column is not None:
for field, translated_field in TRANSLATABLE_COLUMNS.items():
if translated_field == column:
return aggregate.replace(column, field)
return aggregate
# TODO(Ecosystem): Convert to using get_filtered_actions
def get_slack_actions_with_async_lookups(
organization: Organization,
data: Mapping[str, Any],
) -> list[Mapping[str, Any]]:
"""Return Slack trigger actions that require async lookup"""
try:
from sentry.incidents.serializers import AlertRuleTriggerActionSerializer
slack_actions = []
for trigger in data["triggers"]:
for action in trigger["actions"]:
action = rewrite_trigger_action_fields(action)
a_s = AlertRuleTriggerActionSerializer(
context={
"organization": organization,
"access": SystemAccess(),
"input_channel_id": action.get("inputChannelId"),
"installations": app_service.installations_for_organization(
organization_id=organization.id
),
},
data=action,
)
# If a channel does not have a channel ID we should use an async look up to find it
# The calling function will receive a list of channels in need of this look up and schedule it
if a_s.is_valid():
if (
a_s.validated_data["type"].value == AlertRuleTriggerAction.Type.SLACK.value
and not a_s.validated_data["input_channel_id"]
):
slack_actions.append(a_s.validated_data)
return slack_actions
except KeyError:
# If we have any KeyErrors reading the data, we can just return nothing
# This will cause the endpoint to try creating the rule synchronously
# which will capture the error properly.
return []
def get_slack_channel_ids(
organization: Organization,
user: User | RpcUser | None,
data: Mapping[str, Any],
) -> Mapping[str, Any]:
slack_actions = get_slack_actions_with_async_lookups(organization, data)
mapped_slack_channels = {}
for action in slack_actions:
if not action["target_identifier"] in mapped_slack_channels:
target = get_target_identifier_display_for_integration(
action["type"].value,
action["target_identifier"],
organization,
action["integration_id"],
)
mapped_slack_channels[action["target_identifier"]] = target.identifier
return mapped_slack_channels
def rewrite_trigger_action_fields(action_data: dict[str, Any]) -> dict[str, Any]:
if "integration_id" in action_data:
action_data["integration"] = action_data.pop("integration_id")
elif "integrationId" in action_data:
action_data["integration"] = action_data.pop("integrationId")
if "sentry_app_id" in action_data:
action_data["sentry_app"] = action_data.pop("sentry_app_id")
elif "sentryAppId" in action_data:
action_data["sentry_app"] = action_data.pop("sentryAppId")
if "settings" in action_data:
action_data["sentry_app_config"] = action_data.pop("settings")
return action_data
def get_filtered_actions(
alert_rule_data: Mapping[str, Any],
action_type: ActionService,
) -> list[dict[str, Any]]:
def is_included(action: Mapping[str, Any]) -> bool:
type_slug = action.get("type")
if type_slug is None or not isinstance(type_slug, str):
return False
factory = AlertRuleTriggerAction.look_up_factory_by_slug(type_slug)
return factory is not None and factory.service_type == action_type
return [
rewrite_trigger_action_fields(action)
for trigger in alert_rule_data.get("triggers", [])
for action in trigger.get("actions", [])
if is_included(action)
]
def schedule_update_project_config(
alert_rule: AlertRule, projects: Iterable[Project] | None
) -> None:
"""
If `should_use_on_demand`, then invalidate the project configs
"""
enabled_features = on_demand_metrics_feature_flags(_unpack_organization(alert_rule))
prefilling = "organizations:on-demand-metrics-prefill" in enabled_features
prefilling_for_deprecation = (
"organizations:on-demand-gen-metrics-deprecation-prefill" in enabled_features
)
if (
not projects
or "organizations:on-demand-metrics-extraction" not in enabled_features
and not prefilling
and not prefilling_for_deprecation
):
return
alert_snuba_query = _unpack_snuba_query(alert_rule)
should_use_on_demand = should_use_on_demand_metrics(
alert_snuba_query.dataset,
alert_snuba_query.aggregate,
alert_snuba_query.query,
None,
prefilling,
prefilling_for_deprecation=prefilling_for_deprecation,
)
if should_use_on_demand:
for project in projects:
schedule_invalidate_project_config(
trigger="alerts:create-on-demand-metric", project_id=project.id
)
| AlertTarget |
python | huggingface__transformers | tests/models/janus/test_modeling_janus.py | {
"start": 15462,
"end": 23582
} | class ____(unittest.TestCase):
def setUp(self):
self.model_id = "deepseek-community/Janus-Pro-1B"
@slow
def test_model_text_generation(self):
model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto")
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
image = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
prompt = "<image_placeholder>\nDescribe what do you see here and tell me about the history behind it?"
inputs = processor(images=image, text=prompt, generation_mode="text", return_tensors="pt").to(model.device)
output = model.generate(**inputs, max_new_tokens=20, generation_mode="text", do_sample=False)
EXPECTED_DECODED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\n\nDescribe what do you see here and tell me about the history behind it?\n\nThe image depicts the constellation of Leo, which is often referred to as the "Lion"' # fmt: skip
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_DECODED_TEXT,
)
@slow
def test_model_text_generation_batched(self):
model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto")
processor = AutoProcessor.from_pretrained(self.model_id)
image_1 = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
image_2 = Image.open(
requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw
)
prompts = [
"<image_placeholder>\nDescribe what do you see here and tell me about the history behind it?",
"What constellation is this image showing?<image_placeholder>\n",
]
inputs = processor(
images=[image_1, image_2], text=prompts, generation_mode="text", padding=True, return_tensors="pt"
).to(model.device, torch.float16)
EXPECTED_TEXT_COMPLETION = [
'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\n\nDescribe what do you see here and tell me about the history behind it?\n\nThe image depicts the constellation of Leo, which is often referred to as the "Lion"',
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nWhat constellation is this image showing?\n\nThe image shows a constellation that is shaped like a stylized figure with a long tail. This",
]
generated_ids = model.generate(**inputs, max_new_tokens=20, generation_mode="text", do_sample=False)
text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_model_text_generation_with_multi_image(self):
model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto")
processor = AutoProcessor.from_pretrained(self.model_id)
image_1 = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
image_2 = Image.open(
requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw
)
prompt = "What do these two images <image_placeholder> and <image_placeholder> have in common?"
inputs = processor(images=[image_1, image_2], text=prompt, generation_mode="text", return_tensors="pt").to(
model.device, torch.float16
)
EXPECTED_TEXT_COMPLETION = ['You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nWhat do these two images and have in common?\n\nThe two images you provided are of the same constellation. The first image shows the constellation of Leo, and the second image shows the constellation of Ursa Major. Both constellations are part of'] # fmt: skip
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_model_generate_images(self):
model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto")
processor = AutoProcessor.from_pretrained(self.model_id)
inputs = processor(
text=["A portrait of young girl. masterpiece, film grained, best quality."],
padding=True,
generation_mode="image",
return_tensors="pt",
).to(model.device)
self.assertTrue(inputs.input_ids.shape[1] == 17)
out = model.generate(
**inputs,
generation_mode="image",
do_sample=False,
)
# It should run for num_image_tokens in this case 576.
self.assertTrue(out.shape[1] == 576)
# fmt: off
expected_tokens = Expectations(
{
("rocm", None): [
10367, 1380, 4841, 15155, 1224, 16361, 15834, 13722, 15258, 8321, 10496, 14532, 8770, 12353, 5481,
11484, 2585, 8587, 3201, 14292, 3356, 2037, 3077, 6107, 3758, 2572, 9376, 13219, 6007, 14292, 12696,
10666, 10046, 13483, 8282, 9101, 5208, 4260, 13886, 13335, 6135, 2316, 15423, 311, 5460, 12218,
14172, 8583, 14577, 3648
],
("rocm", (9, 5)): [
4484, 4015, 15750, 506, 3758, 11651, 8597, 5739, 4861, 971, 14985, 14834, 15438, 7548, 1820, 1465,
13529, 12761, 10503, 12761, 14303, 6155, 4015, 11766, 705, 15736, 14146, 10417, 1951, 7713, 14305,
15617, 6169, 2706, 8006, 14893, 3855, 10188, 15652, 6297, 1097, 12108, 15038, 311, 14998, 15165,
897, 4044, 1762, 4676
],
("cuda", None): [
4484, 4015, 15750, 506, 3758, 11651, 8597, 5739, 4861, 971, 14985, 14834, 15438, 7548, 1820, 1465,
13529, 12761, 10503, 12761, 14303, 6155, 4015, 11766, 705, 15736, 14146, 10417, 1951, 7713, 14305,
15617, 6169, 2706, 8006, 14893, 3855, 10188, 15652, 6297, 1097, 12108, 15038, 311, 14998, 15165,
897, 4044, 1762, 4676
],
("xpu", None): [
4484, 4015, 15750, 506, 3758, 11651, 8597, 5739, 4861, 971, 14985, 14834, 15438, 7548, 1820, 1465,
13529, 12761, 10503, 12761, 14303, 6155, 4015, 11766, 705, 15736, 14146, 10417, 1951, 7713, 14305,
15617, 6169, 2706, 8006, 14893, 3855, 10188, 15652, 6297, 1097, 12108, 15038, 311, 14998, 15165,
897, 4044, 1762, 4676
],
}
)
expected_tokens = torch.tensor(expected_tokens.get_expectation()).to(model.device)
# fmt: on
# Compare the first 50 generated tokens.
self.assertTrue(torch.allclose(expected_tokens, out[0][:50]))
# Decode generated tokens to pixel values and postprocess them.
decoded_pixel_values = model.decode_image_tokens(out)
images = processor.postprocess(list(decoded_pixel_values.float()), return_tensors="np")
self.assertTrue(images["pixel_values"].shape == (1, 384, 384, 3))
self.assertTrue(isinstance(images["pixel_values"], np.ndarray))
| JanusIntegrationTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/tree/select_leaf_embedding_retriever.py | {
"start": 705,
"end": 5988
} | class ____(TreeSelectLeafRetriever):
"""
Tree select leaf embedding retriever.
This class traverses the index graph using the embedding similarity between the
query and the node text.
Args:
query_template (Optional[BasePromptTemplate]): Tree Select Query Prompt
(see :ref:`Prompt-Templates`).
query_template_multiple (Optional[BasePromptTemplate]): Tree Select
Query Prompt (Multiple)
(see :ref:`Prompt-Templates`).
text_qa_template (Optional[BasePromptTemplate]): Question-Answer Prompt
(see :ref:`Prompt-Templates`).
refine_template (Optional[BasePromptTemplate]): Refinement Prompt
(see :ref:`Prompt-Templates`).
child_branch_factor (int): Number of child nodes to consider at each level.
If child_branch_factor is 1, then the query will only choose one child node
to traverse for any given parent node.
If child_branch_factor is 2, then the query will choose two child nodes.
embed_model (Optional[BaseEmbedding]): Embedding model to use for
embedding similarity.
"""
def __init__(
self,
index: TreeIndex,
embed_model: Optional[BaseEmbedding] = None,
query_template: Optional[BasePromptTemplate] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
query_template_multiple: Optional[BasePromptTemplate] = None,
child_branch_factor: int = 1,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
**kwargs: Any,
):
super().__init__(
index,
query_template=query_template,
text_qa_template=text_qa_template,
refine_template=refine_template,
query_template_multiple=query_template_multiple,
child_branch_factor=child_branch_factor,
verbose=verbose,
callback_manager=callback_manager,
object_map=object_map,
**kwargs,
)
self._embed_model = embed_model or Settings.embed_model
def _query_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> str:
"""Answer a query recursively."""
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
# Get the node with the highest similarity to the query
selected_nodes, selected_indices = self._get_most_similar_nodes(
cur_node_list, query_bundle
)
result_response = None
for node, index in zip(selected_nodes, selected_indices):
logger.debug(
f">[Level {level}] Node [{index + 1}] Summary text: "
f"{' '.join(node.get_content().splitlines())}"
)
# Get the response for the selected node
result_response = self._query_with_selected_node(
node, query_bundle, level=level, prev_response=result_response
)
return cast(str, result_response)
def _get_query_text_embedding_similarities(
self, query_bundle: QueryBundle, nodes: List[BaseNode]
) -> List[float]:
"""
Get query text embedding similarity.
Cache the query embedding and the node text embedding.
"""
if query_bundle.embedding is None:
query_bundle.embedding = self._embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
similarities = []
for node in nodes:
if node.embedding is None:
node.embedding = self._embed_model.get_text_embedding(
node.get_content(metadata_mode=MetadataMode.EMBED)
)
similarity = self._embed_model.similarity(
query_bundle.embedding, node.embedding
)
similarities.append(similarity)
return similarities
def _get_most_similar_nodes(
self, nodes: List[BaseNode], query_bundle: QueryBundle
) -> Tuple[List[BaseNode], List[int]]:
"""Get the node with the highest similarity to the query."""
similarities = self._get_query_text_embedding_similarities(query_bundle, nodes)
selected_nodes: List[BaseNode] = []
selected_indices: List[int] = []
for node, _ in sorted(
zip(nodes, similarities), key=lambda x: x[1], reverse=True
):
if len(selected_nodes) < self.child_branch_factor:
selected_nodes.append(node)
selected_indices.append(nodes.index(node))
else:
break
return selected_nodes, selected_indices
def _select_nodes(
self,
cur_node_list: List[BaseNode],
query_bundle: QueryBundle,
level: int = 0,
) -> List[BaseNode]:
selected_nodes, _ = self._get_most_similar_nodes(cur_node_list, query_bundle)
return selected_nodes
| TreeSelectLeafEmbeddingRetriever |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 41746,
"end": 42254
} | class ____(ZoomBaseTool):
''' *toolbar icon*: |zoom_in_icon|
The zoom-in tool allows users to click a button to zoom in
by a fixed amount.
.. |zoom_in_icon| image:: /_images/icons/zoom-in.svg
:height: 24px
:alt: Icon of a plus sign next to a magnifying glass representing the zoom-in tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| ZoomInTool |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 670305,
"end": 678512
} | class ____(
DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefnumberArray
):
"""
StrokeDashDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeDash"
@overload
def bandPosition(self, _: float, /) -> StrokeDashDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
) -> StrokeDashDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
) -> StrokeDashDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberArrayExprRef], /
) -> StrokeDashDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> StrokeDashDatum: ...
@overload
def type(self, _: Type_T, /) -> StrokeDashDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
| StrokeDashDatum |
python | doocs__leetcode | solution/1300-1399/1325.Delete Leaves With a Given Value/Solution.py | {
"start": 192,
"end": 605
} | class ____:
def removeLeafNodes(
self, root: Optional[TreeNode], target: int
) -> Optional[TreeNode]:
if root is None:
return None
root.left = self.removeLeafNodes(root.left, target)
root.right = self.removeLeafNodes(root.right, target)
if root.left is None and root.right is None and root.val == target:
return None
return root
| Solution |
python | doocs__leetcode | solution/3200-3299/3228.Maximum Number of Operations to Move Ones to the End/Solution.py | {
"start": 0,
"end": 252
} | class ____:
def maxOperations(self, s: str) -> int:
ans = cnt = 0
for i, c in enumerate(s):
if c == "1":
cnt += 1
elif i and s[i - 1] == "1":
ans += cnt
return ans
| Solution |
python | pytest-dev__pytest-xdist | testing/test_workermanage.py | {
"start": 5221,
"end": 6430
} | class ____:
def test_hrsync_filter(self, source: Path, dest: Path) -> None:
source.joinpath("dir").mkdir()
source.joinpath("dir", "file.txt").touch()
source.joinpath(".svn").mkdir()
source.joinpath(".svn", "entries").touch()
source.joinpath(".somedotfile").mkdir()
source.joinpath(".somedotfile", "moreentries").touch()
source.joinpath("somedir").mkdir()
source.joinpath("somedir", "editfile~").touch()
syncer = HostRSync(source, ignores=NodeManager.DEFAULT_IGNORES)
files = list(visit_path(source, recurse=syncer.filter, filter=syncer.filter))
names = {x.name for x in files}
assert names == {"dir", "file.txt", "somedir"}
def test_hrsync_one_host(self, source: Path, dest: Path) -> None:
gw = execnet.makegateway("execmodel=main_thread_only//popen//chdir=%s" % dest)
finished = []
rsync = HostRSync(source)
rsync.add_target_host(gw, finished=lambda: finished.append(1))
source.joinpath("hello.py").write_text("world")
rsync.send()
gw.exit()
assert dest.joinpath(source.name, "hello.py").exists()
assert len(finished) == 1
| TestHRSync |
python | jazzband__django-model-utils | tests/test_fields/test_field_tracker.py | {
"start": 32968,
"end": 34087
} | class ____(FieldTrackedModelCustomTests):
tracked_class = ModelTrackedNotDefault
def test_first_save(self) -> None:
self.assertHasChanged(name=True, number=True)
self.assertPrevious(name=None, number=None)
self.assertCurrent(name='')
self.assertChanged()
self.instance.name = 'retro'
self.instance.number = 4
self.assertHasChanged(name=True, number=True)
self.assertPrevious(name=None, number=None)
self.assertCurrent(name='retro')
self.assertChanged()
def test_pre_save_has_changed(self) -> None:
self.assertHasChanged(name=True, number=True)
self.instance.name = 'new age'
self.assertHasChanged(name=True, number=True)
self.instance.number = 7
self.assertHasChanged(name=True, number=True)
def test_pre_save_changed(self) -> None:
self.assertChanged()
self.instance.name = 'new age'
self.assertChanged()
self.instance.number = 8
self.assertChanged()
self.instance.name = ''
self.assertChanged()
| ModelTrackedModelCustomTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area04.py | {
"start": 315,
"end": 1188
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area04.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:A8")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | facebook__pyre-check | client/coverage_data.py | {
"start": 1782,
"end": 1928
} | class ____(json_mixins.SnakeCaseAndExcludeJsonMixin):
parent: Optional[str]
name: str
@dataclasses.dataclass(frozen=True)
| FunctionIdentifier |
python | django__django | django/db/models/lookups.py | {
"start": 13373,
"end": 15094
} | class ____(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "exact"
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(query := self.rhs, Query):
if not query.has_limit_one():
raise ValueError(
"The QuerySet value for an exact lookup must be limited to "
"one result using slicing."
)
lhs_len = len(self.lhs) if isinstance(self.lhs, (ColPairs, tuple)) else 1
if (rhs_len := query._subquery_fields_len) != lhs_len:
raise ValueError(
f"The QuerySet value for the exact lookup must have {lhs_len} "
f"selected fields (received {rhs_len})"
)
if not query.has_select_fields:
query.clear_select_clause()
query.add_fields(["pk"])
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
# Avoid comparison against direct rhs if lhs is a boolean value. That
# turns "boolfield__exact=True" into "WHERE boolean_field" instead of
# "WHERE boolean_field = True" when allowed.
if (
isinstance(self.rhs, bool)
and getattr(self.lhs, "conditional", False)
and connection.ops.conditional_expression_supported_in_where_clause(
self.lhs
)
):
lhs_sql, params = self.process_lhs(compiler, connection)
template = "%s" if self.rhs else "NOT %s"
return template % lhs_sql, params
return super().as_sql(compiler, connection)
@Field.register_lookup
| Exact |
python | FactoryBoy__factory_boy | tests/test_declarations.py | {
"start": 174,
"end": 383
} | class ____(unittest.TestCase):
def test_errors(self):
with self.assertRaises(NotImplementedError):
utils.evaluate_declaration(declarations.OrderedDeclaration())
| OrderedDeclarationTestCase |
python | doocs__leetcode | solution/2900-2999/2940.Find Building Where Alice and Bob Can Meet/Solution.py | {
"start": 441,
"end": 1244
} | class ____:
def leftmostBuildingQueries(
self, heights: List[int], queries: List[List[int]]
) -> List[int]:
n, m = len(heights), len(queries)
for i in range(m):
queries[i] = [min(queries[i]), max(queries[i])]
j = n - 1
s = sorted(set(heights))
ans = [-1] * m
tree = BinaryIndexedTree(n)
for i in sorted(range(m), key=lambda i: -queries[i][1]):
l, r = queries[i]
while j > r:
k = n - bisect_left(s, heights[j]) + 1
tree.update(k, j)
j -= 1
if l == r or heights[l] < heights[r]:
ans[i] = r
else:
k = n - bisect_left(s, heights[l])
ans[i] = tree.query(k)
return ans
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 4944,
"end": 9844
} | class ____(TestCase):
def test_simple(self):
a = np.ones((20, 10), "d")
assert_array_equal(apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
def test_simple101(self):
a = np.ones((10, 101), "d")
assert_array_equal(apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
def test_3d(self):
a = np.arange(27).reshape((3, 3, 3))
assert_array_equal(
apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]
)
def test_scalar_array(self, cls=np.ndarray):
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(np.sum, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
def test_0d_array(self, cls=np.ndarray):
def sum_to_0d(x):
"""Sum x, returning a 0d array of the same class"""
assert_equal(x.ndim, 1)
return np.squeeze(np.sum(x, keepdims=True))
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(sum_to_0d, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
res = apply_along_axis(sum_to_0d, 1, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
"""produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:, None]).view(cls)
a2d = np.arange(6 * 3).reshape((6, 3))
# 2d insertion along first axis
actual = apply_along_axis(f1to2, 0, a2d)
expected = np.stack(
[f1to2(a2d[:, i]) for i in range(a2d.shape[1])], axis=-1
).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
# 2d insertion along last axis
actual = apply_along_axis(f1to2, 1, a2d)
expected = np.stack(
[f1to2(a2d[i, :]) for i in range(a2d.shape[0])], axis=0
).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
# 3d insertion along middle axis
a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3))
actual = apply_along_axis(f1to2, 1, a3d)
expected = np.stack(
[
np.stack([f1to2(a3d[i, :, j]) for i in range(a3d.shape[0])], axis=0)
for j in range(a3d.shape[2])
],
axis=-1,
).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
def test_axis_insertion_ma(self):
def f1to2(x):
"""produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:, None]
return np.ma.masked_where(res % 5 == 0, res)
a = np.arange(6 * 3).reshape((6, 3))
res = apply_along_axis(f1to2, 0, a)
assert_(isinstance(res, np.ma.masked_array))
assert_equal(res.ndim, 3)
assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask)
assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask)
assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask)
def test_tuple_func1d(self):
def sample_1d(x):
return x[1], x[0]
res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
assert_array_equal(res, np.array([[2, 1], [4, 3]]))
def test_empty(self):
# can't apply_along_axis when there's no chance to call the function
def never_call(x):
assert_(False) # should never be reached
a = np.empty((0, 0))
assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
# but it's sometimes ok with some non-zero dimensions
def empty_to_1(x):
assert_(len(x) == 0)
return 1
a = np.empty((10, 0))
actual = np.apply_along_axis(empty_to_1, 1, a)
assert_equal(actual, np.ones(10))
assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
@skip # TypeError: descriptor 'union' for 'set' objects doesn't apply to a 'numpy.int64' object
def test_with_iterable_object(self):
# from issue 5248
d = np.array([[{1, 11}, {2, 22}, {3, 33}], [{4, 44}, {5, 55}, {6, 66}]])
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
assert_equal(actual, expected)
# issue 8642 - assert_equal doesn't detect this!
for i in np.ndindex(actual.shape):
assert_equal(type(actual[i]), type(expected[i]))
@xfail # (reason="apply_over_axes not implemented")
| TestApplyAlongAxis |
python | apache__airflow | providers/opsgenie/src/airflow/providers/opsgenie/operators/opsgenie.py | {
"start": 1113,
"end": 5108
} | class ____(BaseOperator):
"""
This operator allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieCreateAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param message: The Message of the Opsgenie alert (templated)
:param alias: Client-defined identifier of the alert (templated)
:param description: Description field of the alert (templated)
:param responders: Teams, users, escalations and schedules that
the alert will be routed to send notifications.
:param visible_to: Teams and users that the alert will become visible
to without sending any notification.
:param actions: Custom actions that will be available for the alert.
:param tags: Tags of the alert.
:param details: Map of key-value pairs to use as custom properties of the alert.
:param entity: Entity field of the alert that is
generally used to specify which domain alert is related to. (templated)
:param source: Source field of the alert. Default value is
IP address of the incoming request.
:param priority: Priority level of the alert. Default value is P3. (templated)
:param user: Display name of the request owner.
:param note: Additional note that will be added while creating the alert. (templated)
"""
template_fields: Sequence[str] = ("message", "alias", "description", "entity", "priority", "note")
def __init__(
self,
*,
message: str,
opsgenie_conn_id: str = "opsgenie_default",
alias: str | None = None,
description: str | None = None,
responders: list[dict] | None = None,
visible_to: list[dict] | None = None,
actions: list[str] | None = None,
tags: list[str] | None = None,
details: dict | None = None,
entity: str | None = None,
source: str | None = None,
priority: str | None = None,
user: str | None = None,
note: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.message = message
self.opsgenie_conn_id = opsgenie_conn_id
self.alias = alias
self.description = description
self.responders = responders
self.visible_to = visible_to
self.actions = actions
self.tags = tags
self.details = details
self.entity = entity
self.source = source
self.priority = priority
self.user = user
self.note = note
self.hook: OpsgenieAlertHook | None = None
def _build_opsgenie_payload(self) -> dict[str, Any]:
"""
Construct the Opsgenie JSON payload.
All relevant parameters are combined here to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message",
"alias",
"description",
"responders",
"visible_to",
"actions",
"tags",
"details",
"entity",
"source",
"priority",
"user",
"note",
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to post message."""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.create_alert(self._build_opsgenie_payload())
| OpsgenieCreateAlertOperator |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/action_flattener.py | {
"start": 244,
"end": 1713
} | class ____:
def __init__(self, action_spec: ActionSpec):
"""
A torch module that creates the flattened form of an AgentAction object.
The flattened form is the continuous action concatenated with the
concatenated one hot encodings of the discrete actions.
:param action_spec: An ActionSpec that describes the action space dimensions
"""
self._specs = action_spec
@property
def flattened_size(self) -> int:
"""
The flattened size is the continuous size plus the sum of the branch sizes
since discrete actions are encoded as one hots.
"""
return self._specs.continuous_size + sum(self._specs.discrete_branches)
def forward(self, action: AgentAction) -> torch.Tensor:
"""
Returns a tensor corresponding the flattened action
:param action: An AgentAction object
"""
action_list: List[torch.Tensor] = []
if self._specs.continuous_size > 0:
action_list.append(action.continuous_tensor)
if self._specs.discrete_size > 0:
flat_discrete = torch.cat(
ModelUtils.actions_to_onehot(
torch.as_tensor(action.discrete_tensor, dtype=torch.long),
self._specs.discrete_branches,
),
dim=1,
)
action_list.append(flat_discrete)
return torch.cat(action_list, dim=1)
| ActionFlattener |
python | getsentry__sentry | src/sentry/auth/providers/saml2/forms.py | {
"start": 3774,
"end": 4727
} | class ____(forms.Form):
# NOTE: These fields explicitly map to the sentry.auth.saml2.Attributes keys
identifier = forms.CharField(
label="IdP User ID",
widget=forms.TextInput(attrs={"placeholder": "eg. user.uniqueID"}),
help_text=_(
"The IdPs unique ID attribute key for the user. This is "
"what Sentry will used to identify the users identity from "
"the identity provider."
),
)
user_email = forms.CharField(
label="User Email",
widget=forms.TextInput(attrs={"placeholder": "eg. user.email"}),
help_text=_(
"The IdPs email address attribute key for the "
"user. Upon initial linking this will be used to identify "
"the user in Sentry."
),
)
first_name = forms.CharField(label="First Name", required=False)
last_name = forms.CharField(label="Last Name", required=False)
| AttributeMappingForm |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 27010,
"end": 27528
} | class ____(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
| CustomComparatorTest |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 15433,
"end": 15757
} | class ____(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
rng = np.random.default_rng(12345678)
self.levels = rng.integers(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
| DescriptiveStats |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 1160,
"end": 1257
} | class ____:
class Inner:
pass
def __init__(self):
pass
| ClassWithInitWithInner |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 358803,
"end": 398389
} | class ____(Response):
"""
Response of tasks.get_all endpoint.
:param tasks: List of tasks
:type tasks: Sequence[Task]
:param scroll_id: Scroll ID that can be used with the next calls to get_all to
retrieve more data
:type scroll_id: str
"""
_service = "tasks"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"dataviews": {
"description": "Additional dataviews for the task",
"items": {"additionalProperties": True, "type": "object"},
"type": ["array", "null"],
},
"docker_cmd": {
"description": "Command for running docker script for the execution of the task",
"type": ["string", "null"],
},
"framework": {
"description": (
"Framework related to the task. Case insensitive. Mandatory for Training tasks. "
),
"type": ["string", "null"],
},
"model": {
"description": "Execution input model ID Not applicable for Register (Import) tasks",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": (
"Json object representing the ids of the labels in the model.\n The keys are the"
" layers' names and the values are the IDs.\n Not applicable for Register"
" (Import) tasks.\n Mandatory for Training tasks"
),
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
"test_split": {
"description": "Percentage of frames to use for testing only",
"type": ["integer", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"filtering": {
"properties": {
"filtering_rules": {
"description": "List of FilterRule ('OR' connection)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
},
"type": "object",
},
"input": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/augmentation"},
{"type": "null"},
],
},
"dataviews": {
"additionalProperties": {"type": "string"},
"description": "Key to DataView ID Mapping",
"type": ["object", "null"],
},
"frames_filter": {
"description": "Filtering params",
"oneOf": [
{"$ref": "#/definitions/filtering"},
{"type": "null"},
],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"mapping": {
"description": "Mapping params (see common definitions section)",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"view": {
"description": "View params",
"oneOf": [{"$ref": "#/definitions/view"}, {"type": "null"}],
},
},
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"type": ["string", "null"],
},
"random_seed": {
"description": "Random seed used during iteration",
"type": "integer",
},
},
"required": ["random_seed"],
"type": "object",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_event": {
"properties": {
"max_value": {
"description": "Maximum value reported",
"type": ["number", "null"],
},
"max_value_iteration": {
"description": "The iteration at which the maximum value was reported",
"type": ["integer", "null"],
},
"metric": {
"description": "Metric name",
"type": ["string", "null"],
},
"min_value": {
"description": "Minimum value reported",
"type": ["number", "null"],
},
"min_value_iteration": {
"description": "The iteration at which the minimum value was reported",
"type": ["integer", "null"],
},
"value": {
"description": "Last value reported",
"type": ["number", "null"],
},
"variant": {
"description": "Variant name",
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_variants": {
"additionalProperties": {"$ref": "#/definitions/last_metrics_event"},
"description": "Last metric events, one for each variant hash",
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output": {
"properties": {
"destination": {
"description": "Storage id. This is where output files will be stored.",
"type": ["string", "null"],
},
"error": {
"description": "Last error text",
"type": ["string", "null"],
},
"model": {"description": "Model id.", "type": ["string", "null"]},
"result": {
"description": "Task result. Values: 'success', 'failure'",
"type": ["string", "null"],
},
"view": {
"description": "View params",
"oneOf": [{"$ref": "#/definitions/view"}, {"type": "null"}],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": (
"Repository branch id If not provided and tag not provided, default repository branch "
"is used."
),
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": (
"Version (changeset) number. Optional (default is head version) Unused if tag is provided."
),
"type": ["string", "null"],
},
"working_dir": {
"description": (
"Path to the folder from which to run the script Default - root folder of repository"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task": {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {
"description": "Free text comment",
"type": ["string", "null"],
},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {
"$ref": "#/definitions/configuration_item"
},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"additionalProperties": {"type": ["string", "null"]},
"description": "Docker container parameters",
"type": ["object", "null"],
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [
{"$ref": "#/definitions/execution"},
{"type": "null"},
],
},
"hyperparams": {
"additionalProperties": {
"$ref": "#/definitions/section_params"
},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"input": {
"description": "Task input params",
"oneOf": [{"$ref": "#/definitions/input"}, {"type": "null"}],
},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {
"$ref": "#/definitions/last_metrics_variants"
},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": (
"Last time this task was created, edited, changed or events for this task were reported"
),
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [
{"$ref": "#/definitions/task_models"},
{"type": "null"},
],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {
"description": "Parent task id",
"type": ["string", "null"],
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Task publish time",
"format": "date-time",
"type": ["string", "null"],
},
"runtime": {
"additionalProperties": True,
"description": "Task runtime mapping",
"type": ["object", "null"],
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [
{"$ref": "#/definitions/task_status_enum"},
{"type": "null"},
],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'dataset_import', 'annotation', 'training', 'testing'",
"oneOf": [
{"$ref": "#/definitions/task_type_enum"},
{"type": "null"},
],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
"task_type_enum": {
"enum": [
"dataset_import",
"annotation",
"annotation_manual",
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
"view": {
"properties": {
"entries": {
"description": "List of view entries. All tasks must have at least one view.",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
}
},
"type": "object",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all to retrieve more data",
"type": ["string", "null"],
},
"tasks": {
"description": "List of tasks",
"items": {"$ref": "#/definitions/task"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, tasks=None, scroll_id=None, **kwargs):
super(GetAllResponse, self).__init__(**kwargs)
self.tasks = tasks
self.scroll_id = scroll_id
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Task.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "tasks", Task, is_array=True)
self._property_tasks = value
@schema_property("scroll_id")
def scroll_id(self):
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value):
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetAllResponse |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 2024,
"end": 2271
} | class ____(Error): # noqa: N818
"""Exceptions with Markdown in their description.
Instances of this class can use markdown in their messages, which will get
nicely formatted on the frontend.
"""
pass
| MarkdownFormattedException |
python | getsentry__sentry | src/sentry/organizations/services/organization/model.py | {
"start": 13376,
"end": 14225
} | class ____(IntEnum):
INTEGRATION_ADDED = 1
MEMBER_JOINED = 2
SSO_ENABLED = 3
@classmethod
def from_signal(cls, signal: Signal) -> "RpcOrganizationSignal":
for enum, s in cls.signal_map().items():
if s is signal:
return enum
raise ValueError(f"Signal {signal!r} is not a valid RpcOrganizationSignal")
@classmethod
def signal_map(cls) -> Mapping["RpcOrganizationSignal", Signal]:
from sentry.signals import integration_added, member_joined
return {
RpcOrganizationSignal.INTEGRATION_ADDED: integration_added,
RpcOrganizationSignal.MEMBER_JOINED: member_joined,
RpcOrganizationSignal.SSO_ENABLED: sso_enabled,
}
@property
def signal(self) -> Signal:
return self.signal_map()[self]
| RpcOrganizationSignal |
python | scipy__scipy | scipy/stats/_discrete_distns.py | {
"start": 5167,
"end": 7613
} | class ____(rv_discrete):
r"""A beta-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-binomial distribution is a binomial distribution with a
probability of success `p` that follows a beta distribution.
The probability mass function for `betabinom` is:
.. math::
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
%(after_notes)s
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
.. versionadded:: 1.4.0
See Also
--------
beta, binom
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("a", False, (0, np.inf), (False, False)),
_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _rvs(self, n, a, b, size=None, random_state=None):
p = random_state.beta(a, b, size)
return random_state.binomial(n, p, size)
def _get_support(self, n, a, b):
return 0, n
def _argcheck(self, n, a, b):
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = a / (a + b)
e_q = 1 - e_p
mu = n * e_p
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
g1, g2 = None, None
if 's' in moments:
g1 = 1.0 / sqrt(var)
g1 *= (a + b + 2 * n) * (b - a)
g1 /= (a + b + 2) * (a + b)
if 'k' in moments:
g2 = (a + b).astype(e_p.dtype)
g2 *= (a + b - 1 + 6 * n)
g2 += 3 * a * b * (n - 2)
g2 += 6 * n ** 2
g2 -= 3 * e_p * b * n * (6 - n)
g2 -= 18 * e_p * e_q * n ** 2
g2 *= (a + b) ** 2 * (1 + a + b)
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
g2 -= 3
return mu, var, g1, g2
betabinom = betabinom_gen(name='betabinom')
| betabinom_gen |
python | google__pytype | pytype/tools/xref/testdata/attr.py | {
"start": 551,
"end": 1124
} | class ____:
def f(self, x):
#- @bar ref AttrBar
self.bar[x]
#- @init_bar defines/binding FnInitBar
#- @self defines/binding ArgBSelf
#- FnInitBar.node/kind function
#- FnInitBar param.0 ArgBSelf
def init_bar(self):
#- @self ref ArgBSelf
#- @bar defines/binding AttrBar
self.bar = []
return self
## Attribute accesses could span several lines
def baz(self):
(self.
#- @init_bar ref FnInitBar
init_bar()
#- @bar ref AttrBar
.bar)
#- @C defines/binding ClassC
#- ClassC.node/kind record
#- ClassC.subkind class
| B |
python | pydantic__pydantic | pydantic/types.py | {
"start": 100894,
"end": 103365
} | class ____:
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
python_schema = handler(source_type)
return core_schema.json_or_python_schema(json_schema=core_schema.any_schema(), python_schema=python_schema)
if TYPE_CHECKING:
# This seems to only be necessary for mypy
JsonValue: TypeAlias = Union[
list['JsonValue'],
dict[str, 'JsonValue'],
str,
bool,
int,
float,
None,
]
"""A `JsonValue` is used to represent a value that can be serialized to JSON.
It may be one of:
* `list['JsonValue']`
* `dict[str, 'JsonValue']`
* `str`
* `bool`
* `int`
* `float`
* `None`
The following example demonstrates how to use `JsonValue` to validate JSON data,
and what kind of errors to expect when input data is not json serializable.
```python
import json
from pydantic import BaseModel, JsonValue, ValidationError
class Model(BaseModel):
j: JsonValue
valid_json_data = {'j': {'a': {'b': {'c': 1, 'd': [2, None]}}}}
invalid_json_data = {'j': {'a': {'b': ...}}}
print(repr(Model.model_validate(valid_json_data)))
#> Model(j={'a': {'b': {'c': 1, 'd': [2, None]}}})
print(repr(Model.model_validate_json(json.dumps(valid_json_data))))
#> Model(j={'a': {'b': {'c': 1, 'd': [2, None]}}})
try:
Model.model_validate(invalid_json_data)
except ValidationError as e:
print(e)
'''
1 validation error for Model
j.dict.a.dict.b
input was not a valid JSON value [type=invalid-json-value, input_value=Ellipsis, input_type=ellipsis]
'''
```
"""
else:
JsonValue = TypeAliasType(
'JsonValue',
Annotated[
Union[
Annotated[list['JsonValue'], Tag('list')],
Annotated[dict[str, 'JsonValue'], Tag('dict')],
Annotated[str, Tag('str')],
Annotated[bool, Tag('bool')],
Annotated[int, Tag('int')],
Annotated[float, Tag('float')],
Annotated[None, Tag('NoneType')],
],
Discriminator(
_get_type_name,
custom_error_type='invalid-json-value',
custom_error_message='input was not a valid JSON value',
),
_AllowAnyJson,
],
)
| _AllowAnyJson |
python | google__pytype | pytype/tests/test_cmp1.py | {
"start": 4798,
"end": 5957
} | class ____(test_base.BaseTest):
"""Test for comparisons. Also test overloading."""
OPS = ["<", "<=", ">", ">="]
def _test_concrete(self, op):
errors = self.CheckWithErrors(f"""
def f(x, y):
return x {op} y # unsupported-operands[e]
assert_type(f(1, 2), bool)
f(1, "a") # <- error raised from here but in line 2
assert_type(f(object(), "x"), bool)
""")
self.assertErrorRegexes(errors, {"e": "Types.*int.*str"})
self.assertErrorRegexes(errors, {"e": "Called from.*line 4"})
def test_concrete(self):
for op in self.OPS:
self._test_concrete(op)
def test_literal(self):
for op in self.OPS:
errors = self.CheckWithErrors(f"""
'1' {op} 2 # unsupported-operands[e]
""")
self.assertErrorRegexes(errors, {"e": "Types.*str.*int"})
def test_overloaded(self):
ty = self.Infer("""
class Foo:
def __lt__(self, x):
return 3j
def f():
return Foo() < 3
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def __lt__(self, x) -> complex: ...
def f() -> complex: ...
""",
)
| CmpTest |
python | ansible__ansible | lib/ansible/playbook/task.py | {
"start": 2355,
"end": 25203
} | class ____(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatable):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
_post_validate_object = True
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
# NOTE: ONLY set defaults on task attributes that are not inheritable,
# inheritance is only triggered if the 'current value' is Sentinel,
# default can be set at play/top level object and inheritance will take it's course.
args = t.cast(dict, NonInheritableFieldAttribute(isa='dict', default=dict))
action = t.cast(str, NonInheritableFieldAttribute(isa='string'))
async_val = NonInheritableFieldAttribute(isa='int', default=0, alias='async')
changed_when = NonInheritableFieldAttribute(isa='list', default=list)
delay = NonInheritableFieldAttribute(isa='float', default=5)
failed_when = NonInheritableFieldAttribute(isa='list', default=list)
loop = NonInheritableFieldAttribute(isa='list')
loop_control = NonInheritableFieldAttribute(isa='class', class_type=LoopControl, default=LoopControl)
poll = NonInheritableFieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
register = NonInheritableFieldAttribute(isa='string', static=True)
retries = NonInheritableFieldAttribute(isa='int') # default is set in TaskExecutor
until = NonInheritableFieldAttribute(isa='list', default=list)
# deprecated, used to be loop and loop_args but loop has been repurposed
loop_with = NonInheritableFieldAttribute(isa='string', private=True)
def __init__(self, block=None, role=None, task_include=None) -> None:
""" constructors a task, without the Task.load classmethod, it will be pretty blank """
self._role = role
self._parent = None
self.implicit = False
self._resolved_action: str | None = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
_resolved_action_warning = (
"A plugin is sampling the task's resolved_action when it is not resolved. "
"This can be caused by callback plugins using the resolved_action attribute too "
"early (such as in v2_playbook_on_task_start for a task using the action/local_action "
"keyword), or too late (such as in v2_runner_on_ok for a task with a loop). "
"To maximize compatibility with user features, callback plugins should "
"only use this attribute in v2_runner_on_ok/v2_runner_on_failed for tasks "
"without a loop, and v2_runner_item_on_ok/v2_runner_item_on_failed otherwise."
)
@property
def resolved_action(self) -> str | None:
"""The templated and resolved FQCN of the task action or None.
If the action is a template, callback plugins can only use this value in certain methods.
- v2_runner_on_ok and v2_runner_on_failed if there's no task loop
- v2_runner_item_on_ok and v2_runner_item_on_failed if there is a task loop
"""
# Consider deprecating this because it's difficult to use?
# Moving it to the task result would improve the no-loop limitation on v2_runner_on_ok
# but then wouldn't be accessible to v2_playbook_on_task_start, *_on_skipped, etc.
if self._resolved_action is not None:
return self._resolved_action
if not is_possibly_template(self.action):
try:
return self._resolve_action(self.action)
except AnsibleParserError:
display.warning(self._resolved_action_warning, obj=self.action)
else:
display.warning(self._resolved_action_warning, obj=self.action)
return None
def get_name(self, include_role_fqcn=True):
""" return the name of the task """
if self._role:
role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
if self._role and self.name:
return "%s : %s" % (role_name, self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (role_name, self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, str):
return ds
elif isinstance(ds, dict):
buf = ""
for (k, v) in ds.items():
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k, v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
task = Task(block=block, role=role, task_include=task_include)
return task.load_data(data, variable_manager=variable_manager, loader=loader)
def _post_validate_module_defaults(self, attr: str, value: t.Any, templar: TemplateEngine) -> t.Any:
"""Override module_defaults post validation to disable templating, which is handled by args post validation."""
return value
def _post_validate_args(self, attr: str, value: t.Any, templar: TemplateEngine) -> dict[str, t.Any]:
try:
self.action = templar.template(self.action)
except AnsibleValueOmittedError:
# some strategies may trigger this error when templating task.action, but backstop here if not
raise AnsibleParserError("Omit is not valid for the `action` keyword.", obj=self.action) from None
action_context = action_loader.get_with_context(self.action, collection_list=self.collections, class_only=True)
if not action_context.plugin_load_context.resolved:
module_or_action_context = module_loader.find_plugin_with_context(self.action, collection_list=self.collections)
if not module_or_action_context.resolved:
raise AnsibleError(f"Cannot resolve {self.action!r} to an action or module.", obj=self.action)
action_context = action_loader.get_with_context('ansible.legacy.normal', collection_list=self.collections, class_only=True)
else:
module_or_action_context = action_context.plugin_load_context
self._resolved_action = module_or_action_context.resolved_fqcn
action_type: type[ActionBase] = action_context.object
vp = value.pop('_variable_params', None)
supports_raw_params = action_type.supports_raw_params or module_or_action_context.resolved_fqcn in RAW_PARAM_MODULES
if supports_raw_params:
raw_params_to_finalize = None
else:
raw_params_to_finalize = value.pop('_raw_params', None) # always str or None
# TaskArgsFinalizer performs more thorough type checking, but this provides a friendlier error message for a subset of detected cases.
if raw_params_to_finalize and not is_possibly_all_template(raw_params_to_finalize):
raise AnsibleError(f'Action {module_or_action_context.resolved_fqcn!r} does not support raw params.', obj=self.action)
args_finalizer = _task.TaskArgsFinalizer(
_get_action_arg_defaults(module_or_action_context.resolved_fqcn, self, templar),
vp,
raw_params_to_finalize,
value,
templar=templar,
)
try:
with action_type.get_finalize_task_args_context() as finalize_context:
args = args_finalizer.finalize(action_type.finalize_task_arg, context=finalize_context)
except Exception as ex:
raise AnsibleError(f'Finalization of task args for {module_or_action_context.resolved_fqcn!r} failed.', obj=self.action) from ex
if self._origin:
args = self._origin.tag(args)
return args
def _get_meta(self) -> str | None:
# FUTURE: validate meta and return an enum instead of a str
# meta currently does not support being templated, so we can cheat
if self.action in C._ACTION_META:
return self.args.get('_raw_params')
return None
def __repr__(self):
""" returns a human-readable representation of the task """
if meta := self._get_meta():
return f"TASK: meta ({meta})"
else:
return "TASK: %s" % self.get_name()
def _preprocess_with_loop(self, ds, new_ds, k, v):
""" take a lookup plugin name and store it correctly """
loop_name = k.removeprefix("with_")
if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop_with'] = loop_name
new_ds['loop'] = v
def preprocess_data(self, ds):
"""
tasks are especially complex arguments so need pre-processing.
keep it short.
"""
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
# the new, cleaned datastructure, which will have legacy items reduced to a standard structure suitable for the
# attributes of the task class; copy any tagged data to preserve things like origin
new_ds = AnsibleTagHelper.tag_copy(ds, {})
# since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
default_collection = AnsibleCollectionConfig.default_collection
collections_list = ds.get('collections')
if collections_list is None:
# use the parent value if our ds doesn't define it
collections_list = self.collections
else:
# Validate this untemplated field early on to guarantee we are dealing with a list.
# This is also done in CollectionSearch._load_collections() but this runs before that call.
collections_list = self.get_validated_value('collections', self.fattributes.get('collections'), collections_list, None)
if default_collection and not self._role: # FIXME: and not a collections role
if collections_list:
if default_collection not in collections_list:
collections_list.insert(0, default_collection)
else:
collections_list = [default_collection]
if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:
collections_list.append('ansible.legacy')
if collections_list:
ds['collections'] = collections_list
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
# supported as legacy
args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
try:
(action, args, delegate_to) = args_parser.parse()
except AnsibleParserError as ex:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
if ex.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError("Error parsing task arguments.", obj=ds) from ex
if args_parser._resolved_action is not None:
self._resolved_action = args_parser._resolved_action
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
# we handle any 'vars' specified in the ds here, as we may
# be adding things to them below (special handling for includes).
# When that deprecated feature is removed, this can be too.
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
for (k, v) in ds.items():
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
continue
elif k.startswith('with_') and k.removeprefix("with_") in lookup_loader:
# transform into loop property
self._preprocess_with_loop(ds, new_ds, k, v)
elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self.fattributes:
new_ds[k] = v
else:
display.warning("Ignoring invalid attribute: %s" % k)
return super(Task, self).preprocess_data(new_ds)
def _load_loop_control(self, attr, ds):
if not isinstance(ds, dict):
raise AnsibleParserError(
"the `loop_control` value must be specified as a dictionary and cannot "
"be a variable itself (though it can contain variables)",
obj=ds,
)
return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
def _validate_attributes(self, ds):
try:
super(Task, self)._validate_attributes(ds)
except AnsibleParserError as e:
e.message += '\nThis error can be suppressed as a warning using the "invalid_task_attribute_failed" configuration'
raise e
def _validate_changed_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def _validate_failed_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def _validate_register(self, attr, name, value):
if value is not None:
try:
validate_variable_name(value)
except Exception as ex:
raise AnsibleParserError("Invalid 'register' specified.", obj=value) from ex
def post_validate(self, templar):
"""
Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
"""
if self._parent:
self._parent.post_validate(templar)
super(Task, self).post_validate(templar)
def _post_validate_loop(self, attr, value, templar):
"""
Override post validation for the loop field, which is templated
specially in the TaskExecutor class when evaluating loops.
"""
return value
def _post_validate_name(self, attr, value, templar):
"""
Override post-validation behavior for `name` to be best-effort for the vars available.
Direct access via `post_validate_attribute` writes the value back to provide a stable value.
This value is individually post-validated early by strategies for the benefit of callbacks.
"""
with _marker_behaviors.ReplacingMarkerBehavior.warning_context() as replacing_behavior:
self.name = templar.extend(marker_behavior=replacing_behavior).template(value, options=TemplateOptions(value_for_omit=None))
return self.name
def _post_validate_environment(self, attr, value, templar):
"""
Override post validation of vars on the play, as we don't want to
template these too early.
"""
env = {}
# FUTURE: kill this with fire
def _parse_env_kv(k, v):
try:
env[k] = templar.template(v)
except AnsibleValueOmittedError:
# skip this value
return
except AnsibleUndefinedVariable as e:
error = to_native(e)
if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
# ignore as fact gathering is required for 'env' facts
return
raise
# NB: the environment FieldAttribute definition ensures that value is always a list
for env_item in value:
if isinstance(env_item, dict):
for k in env_item:
_parse_env_kv(k, env_item[k])
else:
try:
isdict = templar.template(env_item)
except AnsibleValueOmittedError:
continue
if isinstance(isdict, dict):
env |= isdict
else:
display.warning("could not parse environment value, skipping: %s" % value)
return env
def _post_validate_changed_when(self, attr, value, templar):
"""
changed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def _post_validate_failed_when(self, attr, value, templar):
"""
failed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def _post_validate_until(self, attr, value, templar):
"""
until is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def get_vars(self):
all_vars = dict()
if self._parent:
all_vars |= self._parent.get_vars()
all_vars |= self.vars
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def get_include_params(self):
all_vars = dict()
if self._parent:
all_vars |= self._parent.get_include_params()
if self.action in C._ACTION_ALL_INCLUDES:
all_vars |= self.vars
return all_vars
def copy(self, exclude_parent: bool = False, exclude_tasks: bool = False) -> Task:
new_me = super(Task, self).copy()
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.implicit = self.implicit
new_me._resolved_action = self._resolved_action
new_me._uuid = self._uuid
return new_me
def set_loader(self, loader):
"""
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
"""
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
def _get_parent_attribute(self, attr, omit=False):
"""
Generic logic to get the attribute or parent attribute for a task value.
"""
fattr = self.fattributes[attr]
extend = fattr.extend
prepend = fattr.prepend
try:
# omit self, and only get parent values
if omit:
value = Sentinel
else:
value = getattr(self, f'_{attr}', Sentinel)
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
if getattr(_parent, 'statically_loaded', True):
# vars are always inheritable, other attributes might not be for the parent but still should be for other ancestors
if attr != 'vars' and hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = getattr(_parent, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except KeyError:
pass
return value
def all_parents_static(self):
if self._parent:
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
def get_play(self):
parent = self._parent
while not isinstance(parent, Block):
parent = parent._parent
return parent._play
def dump_attrs(self):
"""Override to smuggle important non-FieldAttribute values back to the controller."""
attrs = super().dump_attrs()
attrs.update(_resolved_action=self._resolved_action)
return attrs
def from_attrs(self, attrs):
super().from_attrs(attrs)
# from_attrs is only used to create a finalized task
# from attrs from the Worker/TaskExecutor
# Those attrs are finalized and squashed in the TE
# and controller side use needs to reflect that
self._finalized = True
self._squashed = True
def _resolve_conditional(
self,
conditional: list[str | bool],
variables: dict[str, t.Any],
*,
result_context: dict[str, t.Any] | None = None,
) -> bool:
"""Loops through the conditionals set on this object, returning False if any of them evaluate as such, as well as the condition that was False."""
engine = TemplateEngine(self._loader, variables=variables)
for item in conditional:
if not engine.evaluate_conditional(item):
if result_context is not None:
result_context.update(false_condition=item)
return False
return True
| Task |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/daemon_tests/test_locations/run_config_assets_workspace.py | {
"start": 164,
"end": 203
} | class ____(dg.Config):
a: int
| ConfigA |
python | lxml__lxml | doc/s5/ep2008/atom.py | {
"start": 10883,
"end": 11140
} | class ____(AtomElement):
"""
For ``<feed>`` elements.
"""
@property
def feed(self):
return self
entries = _findall_property('entry')
title = _text_element_property('title')
author = _element_property('author')
| Feed |
python | huggingface__transformers | src/transformers/models/chameleon/modeling_chameleon.py | {
"start": 18769,
"end": 22125
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: ChameleonConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ChameleonAttention(config=config, layer_idx=layer_idx)
self.mlp = ChameleonMLP(config)
self.input_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.input_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
| ChameleonSwinDecoderLayer |
python | PrefectHQ__prefect | src/prefect/futures.py | {
"start": 4632,
"end": 5645
} | class ____(PrefectFuture[R]):
"""
A Prefect future that represents the eventual execution of a task run.
"""
def __init__(self, task_run_id: uuid.UUID):
self._task_run_id = task_run_id
self._final_state: State[R] | None = None
@property
def task_run_id(self) -> uuid.UUID:
"""The ID of the task run associated with this future"""
return self._task_run_id
@property
def state(self) -> State:
"""The current state of the task run associated with this future"""
if self._final_state:
return self._final_state
client = get_client(sync_client=True)
try:
task_run = client.read_task_run(task_run_id=self.task_run_id)
except ObjectNotFound:
# We'll be optimistic and assume this task will eventually start
# TODO: Consider using task run events to wait for the task to start
return Pending()
return task_run.state or Pending()
| PrefectTaskRunFuture |
python | kamyu104__LeetCode-Solutions | Python/smallest-string-with-a-given-numeric-value.py | {
"start": 29,
"end": 468
} | class ____(object):
def getSmallestString(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
MAX_DIFF = ord('z')-ord('a')
k -= n
result = ['a']*n
for i in reversed(xrange(n)):
tmp = min(k, MAX_DIFF)
result[i] = chr(ord('a')+tmp)
k -= tmp
if k == 0:
break
return "".join(result)
| Solution |
python | sympy__sympy | sympy/stats/rv.py | {
"start": 5698,
"end": 6285
} | class ____(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
| SinglePSpace |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployment_schedules.py | {
"start": 7733,
"end": 11815
} | class ____:
@pytest.fixture
async def schedule_to_update(
self,
get_server_session: AsyncSessionGetter,
deployment_with_schedules,
):
async with get_server_session() as session:
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_with_schedules.id,
)
return schedules[0]
async def test_can_update_schedules_for_deployment(
self,
get_server_session: AsyncSessionGetter,
client: AsyncClient,
deployment_with_schedules,
schedules_url: Callable[..., str],
schedule_to_update: schemas.core.DeploymentSchedule,
):
assert schedule_to_update.active is True
url = schedules_url(
deployment_with_schedules.id, schedule_id=schedule_to_update.id
)
response = await client.patch(
url,
json=schemas.actions.DeploymentScheduleUpdate(
active=False, slug="new-slug"
).model_dump(exclude_unset=True),
)
assert response.status_code == status.HTTP_204_NO_CONTENT
async with get_server_session() as session:
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_with_schedules.id,
)
the_schedule = next(
schedule for schedule in schedules if schedule.id == schedule_to_update.id
)
assert the_schedule.active is False
assert the_schedule.slug == "new-slug"
async def test_404_non_existent_deployment(
self,
client: AsyncClient,
schedules_url: Callable[..., str],
schedule_to_update: schemas.core.DeploymentSchedule,
):
assert schedule_to_update.active is True
url = schedules_url(uuid4(), schedule_id=schedule_to_update.id)
response = await client.patch(
url,
json=schemas.actions.DeploymentScheduleUpdate(active=False).model_dump(
exclude_unset=True
),
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert b"Deployment" in response.content
async def test_404_non_existent_schedule(
self,
deployment,
client: AsyncClient,
schedules_url: Callable[..., str],
):
url = schedules_url(deployment.id, schedule_id=uuid4())
response = await client.patch(
url,
json=schemas.actions.DeploymentScheduleUpdate(active=False).model_dump(
exclude_unset=True
),
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert b"Schedule" in response.content
async def test_updating_schedule_removes_scheduled_runs(
self,
db: PrefectDBInterface,
get_server_session: AsyncSessionGetter,
client: AsyncClient,
deployment_with_schedules,
schedules_url: Callable[..., str],
schedule_to_update: schemas.core.DeploymentSchedule,
scheduled_flow_runs,
):
assert schedule_to_update.active is True
url = schedules_url(
deployment_with_schedules.id, schedule_id=schedule_to_update.id
)
response = await client.patch(
url,
json=schemas.actions.DeploymentScheduleUpdate(active=False).model_dump(
exclude_unset=True
),
)
assert response.status_code == status.HTTP_204_NO_CONTENT
async with get_server_session() as session:
result = await session.execute(
sa.select(db.FlowRun).where(
db.FlowRun.deployment_id == deployment_with_schedules.id,
db.FlowRun.auto_scheduled.is_(True),
)
)
flow_runs = result.scalars().all()
# Deleting the schedule should remove all scheduled runs
assert len(flow_runs) == 0
| TestUpdateDeploymentSchedule |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 57294,
"end": 58031
} | class ____:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
| _TestCopying |
python | mwaskom__seaborn | seaborn/_marks/line.py | {
"start": 4102,
"end": 7156
} | class ____(Mark):
"""
A faster but less-flexible mark for drawing many paths.
See also
--------
Path : A mark connecting data points in the order they appear.
Examples
--------
.. include:: ../docstrings/objects.Paths.rst
"""
color: MappableColor = Mappable("C0")
alpha: MappableFloat = Mappable(1)
linewidth: MappableFloat = Mappable(rc="lines.linewidth")
linestyle: MappableString = Mappable(rc="lines.linestyle")
_sort: ClassVar[bool] = False
def __post_init__(self):
# LineCollection artists have a capstyle property but don't source its value
# from the rc, so we do that manually here. Unfortunately, because we add
# only one LineCollection, we have the use the same capstyle for all lines
# even when they are dashed. It's a slight inconsistency, but looks fine IMO.
self.artist_kws.setdefault("capstyle", mpl.rcParams["lines.solid_capstyle"])
def _plot(self, split_gen, scales, orient):
line_data = {}
for keys, data, ax in split_gen(keep_na=not self._sort):
if ax not in line_data:
line_data[ax] = {
"segments": [],
"colors": [],
"linewidths": [],
"linestyles": [],
}
segments = self._setup_segments(data, orient)
line_data[ax]["segments"].extend(segments)
n = len(segments)
vals = resolve_properties(self, keys, scales)
vals["color"] = resolve_color(self, keys, scales=scales)
line_data[ax]["colors"].extend([vals["color"]] * n)
line_data[ax]["linewidths"].extend([vals["linewidth"]] * n)
line_data[ax]["linestyles"].extend([vals["linestyle"]] * n)
for ax, ax_data in line_data.items():
lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)
# Handle datalim update manually
# https://github.com/matplotlib/matplotlib/issues/23129
ax.add_collection(lines, autolim=False)
if ax_data["segments"]:
xy = np.concatenate(ax_data["segments"])
ax.update_datalim(xy)
def _legend_artist(self, variables, value, scales):
key = resolve_properties(self, {v: value for v in variables}, scales)
artist_kws = self.artist_kws.copy()
capstyle = artist_kws.pop("capstyle")
artist_kws["solid_capstyle"] = capstyle
artist_kws["dash_capstyle"] = capstyle
return mpl.lines.Line2D(
[], [],
color=key["color"],
linewidth=key["linewidth"],
linestyle=key["linestyle"],
**artist_kws,
)
def _setup_segments(self, data, orient):
if self._sort:
data = data.sort_values(orient, kind="mergesort")
# Column stack to avoid block consolidation
xy = np.column_stack([data["x"], data["y"]])
return [xy]
@document_properties
@dataclass
| Paths |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/9_Deep_Deterministic_Policy_Gradient_DDPG/DDPG.py | {
"start": 920,
"end": 4166
} | class ____(object):
def __init__(self, sess, action_dim, action_bound, learning_rate, replacement):
self.sess = sess
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.replacement = replacement
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
if self.replacement['name'] == 'hard':
self.t_replace_counter = 0
self.hard_replace = [tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]
else:
self.soft_replace = [tf.assign(t, (1 - self.replacement['tau']) * t + self.replacement['tau'] * e)
for t, e in zip(self.t_params, self.e_params)]
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.3)
init_b = tf.constant_initializer(0.1)
net = tf.layers.dense(s, 30, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1',
trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
def learn(self, s): # batch update
self.sess.run(self.train_op, feed_dict={S: s})
if self.replacement['name'] == 'soft':
self.sess.run(self.soft_replace)
else:
if self.t_replace_counter % self.replacement['rep_iter_a'] == 0:
self.sess.run(self.hard_replace)
self.t_replace_counter += 1
def choose_action(self, s):
s = s[np.newaxis, :] # single state
return self.sess.run(self.a, feed_dict={S: s})[0] # single action
def add_grad_to_graph(self, a_grads):
with tf.variable_scope('policy_grads'):
# ys = policy;
# xs = policy's parameters;
# a_grads = the gradients of the policy to get more Q
# tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams
self.policy_grads = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)
with tf.variable_scope('A_train'):
opt = tf.train.AdamOptimizer(-self.lr) # (- learning rate) for ascent policy
self.train_op = opt.apply_gradients(zip(self.policy_grads, self.e_params))
############################### Critic ####################################
| Actor |
python | ipython__ipython | IPython/terminal/pt_inputhooks/wx.py | {
"start": 1411,
"end": 1576
} | class ____(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
| EventLoopTimer |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_s3_to_gcs.py | {
"start": 11857,
"end": 20920
} | class ____:
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.CloudDataTransferServiceHook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_execute_deferrable(self, mock_gcs_hook, mock_s3_hook, mock_transfer_hook):
mock_gcs_hook.return_value.project_id = PROJECT_ID
mock_s3_super_hook = mock.MagicMock()
mock_s3_super_hook.list_keys.return_value = MOCK_FILES
mock_s3_hook.conn_config = mock.MagicMock(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
mock_create_transfer_job = mock.MagicMock()
mock_create_transfer_job.return_value = dict(name=TRANSFER_JOB_ID_0)
mock_transfer_hook.return_value.create_transfer_job = mock_create_transfer_job
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
aws_conn_id=AWS_CONN_ID,
replace=True,
deferrable=True,
)
operator.hook = mock_s3_super_hook
with pytest.raises(TaskDeferred) as exception_info:
operator.execute(None)
mock_s3_hook.assert_called_once_with(aws_conn_id=AWS_CONN_ID, verify=operator.verify)
mock_s3_super_hook.list_keys.assert_called_once_with(
bucket_name=S3_BUCKET, prefix=S3_PREFIX, delimiter=S3_DELIMITER, apply_wildcard=False
)
mock_create_transfer_job.assert_called_once()
assert hasattr(exception_info.value, "trigger")
trigger = exception_info.value.trigger
assert trigger.project_id == PROJECT_ID
assert trigger.job_names == [TRANSFER_JOB_ID_0]
assert trigger.poll_interval == operator.poll_interval
assert hasattr(exception_info.value, "method_name")
assert exception_info.value.method_name == "execute_complete"
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_transfer_files_async(
self,
mock_s3_hook,
mock_gcs_hook,
):
mock_s3_hook.conn_config = mock.MagicMock(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
mock_gcs_hook.project_id = PROJECT_ID
expected_job_names = [TRANSFER_JOB_ID_0]
expected_method_name = "execute_complete"
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
)
with mock.patch.object(operator, "submit_transfer_jobs") as mock_submit_transfer_jobs:
mock_submit_transfer_jobs.return_value = expected_job_names
with pytest.raises(TaskDeferred) as exception_info:
operator.transfer_files_async(files=MOCK_FILES, gcs_hook=mock_gcs_hook, s3_hook=mock_s3_hook)
mock_submit_transfer_jobs.assert_called_once_with(
files=MOCK_FILES, gcs_hook=mock_gcs_hook, s3_hook=mock_s3_hook
)
assert hasattr(exception_info.value, "trigger")
trigger = exception_info.value.trigger
assert trigger.project_id == PROJECT_ID
assert trigger.job_names == expected_job_names
assert trigger.poll_interval == operator.poll_interval
assert hasattr(exception_info.value, "method_name")
assert exception_info.value.method_name == expected_method_name
@pytest.mark.parametrize("invalid_poll_interval", [-5, 0])
def test_init_error_polling_interval(self, invalid_poll_interval):
operator = None
expected_error_message = "Invalid value for poll_interval. Expected value greater than 0"
with pytest.raises(ValueError, match=expected_error_message):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
poll_interval=invalid_poll_interval,
)
assert operator is None
def test_transfer_files_async_error_no_files(self):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
)
expected_error_message = "List of transferring files cannot be empty"
with pytest.raises(ValueError, match=expected_error_message):
operator.transfer_files_async(files=[], gcs_hook=mock.MagicMock(), s3_hook=mock.MagicMock())
@pytest.mark.parametrize(
("file_names", "chunks", "expected_job_names"),
[
(MOCK_FILES, [MOCK_FILES], [TRANSFER_JOB_ID_0]),
(
[f"path/to/file{i}" for i in range(2000)],
[
[f"path/to/file{i}" for i in range(1000)],
[f"path/to/file{i}" for i in range(1000, 2000)],
],
TRANSFER_JOBS,
),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_submit_transfer_jobs(
self,
mock_s3_hook,
mock_gcs_hook,
file_names,
chunks,
expected_job_names,
):
mock_s3_hook.conn_config = mock.MagicMock(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
mock_gcs_hook.project_id = PROJECT_ID
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
)
now_time = utcnow()
with time_machine.travel(now_time):
with mock.patch.object(operator, "get_transfer_hook") as mock_get_transfer_hook:
mock_create_transfer_job = mock.MagicMock(
side_effect=[dict(name=job_name) for job_name in expected_job_names]
)
mock_get_transfer_hook.return_value = mock.MagicMock(
create_transfer_job=mock_create_transfer_job
)
job_names = operator.submit_transfer_jobs(
files=file_names,
gcs_hook=mock_gcs_hook,
s3_hook=mock_s3_hook,
)
mock_get_transfer_hook.assert_called_once()
mock_create_transfer_job.assert_called()
assert job_names == expected_job_names
@mock.patch(
"airflow.providers.google.cloud.transfers.s3_to_gcs.S3ToGCSOperator.log", new_callable=PropertyMock
)
def test_execute_complete_success(self, mock_log):
expected_event_message = "Event message (success)"
event = {
"status": "success",
"message": expected_event_message,
}
operator = S3ToGCSOperator(task_id=TASK_ID, bucket=S3_BUCKET)
operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.return_value.info.assert_called_once_with(
"%s completed with response %s ", TASK_ID, event["message"]
)
@mock.patch(
"airflow.providers.google.cloud.transfers.s3_to_gcs.S3ToGCSOperator.log", new_callable=PropertyMock
)
def test_execute_complete_error(self, mock_log):
expected_event_message = "Event error message"
event = {
"status": "error",
"message": expected_event_message,
}
operator = S3ToGCSOperator(task_id=TASK_ID, bucket=S3_BUCKET)
with pytest.raises(AirflowException, match=expected_event_message):
operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.return_value.info.assert_not_called()
@pytest.mark.db_test
def test_get_transfer_hook(self):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
transfer_hook = operator.get_transfer_hook()
assert isinstance(transfer_hook, CloudDataTransferServiceHook)
assert transfer_hook.gcp_conn_id == GCS_CONN_ID
assert transfer_hook.impersonation_chain == IMPERSONATION_CHAIN
| TestS3ToGoogleCloudStorageOperatorDeferrable |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 117847,
"end": 118302
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("column_id", "after_column_id", "client_mutation_id")
column_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="columnId")
after_column_id = sgqlc.types.Field(ID, graphql_name="afterColumnId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| MoveProjectColumnInput |
python | huggingface__transformers | src/transformers/models/unispeech/modular_unispeech.py | {
"start": 2967,
"end": 3051
} | class ____(Wav2Vec2PositionalConvEmbedding):
pass
| UniSpeechPositionalConvEmbedding |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 51515,
"end": 56112
} | class ____(nn.Module):
def __init__(self, config: Sam2VideoPromptEncoderConfig):
super().__init__()
self.shared_embedding = Sam2VideoPositionalEmbedding(config)
self.mask_embed = Sam2VideoMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.view(*boxes.shape[:2], 2, 2)
# add padding point for consistency with the original implementation
coords = torch.nn.functional.pad(coords, (0, 0, 0, 1), mode="constant", value=0)
corner_embedding = self.shared_embedding(coords, (self.input_image_size, self.input_image_size))
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
corner_embedding[:, :, 2, :] = self.not_a_point_embed.weight.expand_as(corner_embedding[:, :, 2, :])
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
| Sam2VideoPromptEncoder |
python | doocs__leetcode | solution/3200-3299/3258.Count Substrings That Satisfy K-Constraint I/Solution.py | {
"start": 0,
"end": 342
} | class ____:
def countKConstraintSubstrings(self, s: str, k: int) -> int:
cnt = [0, 0]
ans = l = 0
for r, x in enumerate(map(int, s)):
cnt[x] += 1
while cnt[0] > k and cnt[1] > k:
cnt[int(s[l])] -= 1
l += 1
ans += r - l + 1
return ans
| Solution |
python | pytorch__pytorch | test/distributed/test_dynamo_distributed.py | {
"start": 19190,
"end": 55176
} | class ____(DynamoDistributedMultiProcTestCase):
"""
Note: MultiProcTestCase spawns processes per test and is slow.
Prefer MultiThreadedTestCase for most tests. Perhaps use this one
sparingly for integration tests.
"""
device_type = (
acc.type if (acc := torch.accelerator.current_accelerator()) else "cpu"
)
@skip_if_lt_x_gpu(2)
@config.patch(optimize_ddp=False, enable_compiler_collectives=True)
def test_ddp_baseline_aot_eager_multiprocess(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
self.assertFalse(config.optimize_ddp)
m, inputs, correct_outputs = get_model(f"{self.device_type}:{self.rank}")
m = DDP(m, device_ids=[self.rank])
m = torch.compile(m, backend="aot_eager")
outputs = m(inputs)
self.assertTrue(same(correct_outputs, outputs))
def _test_hf_bert_ddp_inductor(self, static_graph):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
model, inputs = get_hf_bert(self.rank)
model = DDP(model, static_graph=static_graph)
run_hf_bert_ddp(self, model, inputs, "inductor")
@skip_if_lt_x_gpu(2)
@import_transformers_or_skip()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(optimize_ddp=True, enable_compiler_collectives=True)
@patch.object(torch._inductor.config, "fallback_random", True)
def test_hf_bert_ddp_inductor(self):
self._test_hf_bert_ddp_inductor(static_graph=False)
@skip_if_lt_x_gpu(2)
@import_transformers_or_skip()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(optimize_ddp=True, enable_compiler_collectives=True)
@patch.object(torch._inductor.config, "fallback_random", True)
def test_hf_bert_ddp_inductor_static_graph(self):
self._test_hf_bert_ddp_inductor(static_graph=True)
def _test_hf_bert_aot_eager(self, static_graph):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
model, inputs = get_hf_bert(self.rank)
model = DDP(model, static_graph=static_graph)
run_hf_bert_ddp(self, model, inputs, "aot_eager")
@skip_if_lt_x_gpu(2)
@import_transformers_or_skip()
@config.patch(optimize_ddp=True, enable_compiler_collectives=True)
def test_hf_bert_ddp_aot_eager(self):
self._test_hf_bert_aot_eager(static_graph=False)
@skip_if_lt_x_gpu(2)
@import_transformers_or_skip()
@config.patch(optimize_ddp=True, enable_compiler_collectives=True)
def test_hf_bert_ddp_aot_eager_static_graph(self):
self._test_hf_bert_aot_eager(static_graph=True)
@skip_if_lt_x_gpu(2)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(optimize_ddp=False, enable_compiler_collectives=True)
def test_ddp_activation_checkpointing(self):
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(64, 32)
self.fc2 = torch.nn.Linear(32, 16)
self.fc3 = torch.nn.Linear(16, 8)
def forward(self, inp):
return self.fc3(self.fc2(self.fc1(inp)))
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
self.assertFalse(config.optimize_ddp)
model = MyModel().to(device=self.device_type)
# Activation checkpointing for Linear layers.
non_reentrant_wrapper = functools.partial(
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
check_fn = lambda submodule: isinstance( # noqa: E731
submodule, torch.nn.Linear
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
model = DDP(model)
x = torch.randn(10, 64).to(self.device_type)
correct_outputs = model(x)
opt_model = torch.compile(model)
outputs = opt_model(x)
self.assertTrue(same(correct_outputs, outputs))
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
def test_fsdp_aot_eager(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
# Test with basic FSDP wrapping (outer wrap around whole model)
m, inputs, correct_outputs = get_model(f"{self.device_type}:{self.rank}")
fsdp_m = FSDP(m, use_orig_params=True)
fsdp_m = torch.compile(fsdp_m, backend="aot_eager")
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
# Test with recursive wrapping, nested FSDP around each Linear
m, inputs, correct_outputs = get_model(f"{self.device_type}:{self.rank}")
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
),
use_orig_params=True,
)
fsdp_m = torch.compile(fsdp_m, backend="aot_eager")
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@skip_if_lt_x_gpu(2)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@requires_cuda_and_triton
def test_ddp_optimizer_cudagraph(self):
class Net(nn.Module):
def __init__(self):
super().__init__()
# need a large channel to trigger ddp optimizer split module
self.CHANNELS = 640
self.convi = nn.Conv2d(46, self.CHANNELS, 3, padding=1, bias=False)
self.convp = nn.Conv2d(
self.CHANNELS, self.CHANNELS, 1, padding=0, bias=False
)
self.bni = nn.BatchNorm2d(self.CHANNELS)
def forward(self, bitmap_channels):
x = self.convi(bitmap_channels)
x = self.bni(x)
x = self.convp(x)
return x
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
net = Net().to(self.rank)
optimizer = torch.optim.SGD(
net.parameters(),
lr=5e-2,
)
net = DDP(net, device_ids=[self.rank])
opt_net = torch.compile(net, mode="reduce-overhead")
opt_net.train()
for _ in range(10):
optimizer.zero_grad()
data = torch.randn((16, 46, 8, 8), dtype=torch.float32, device="cuda")
opt_net(data).sum().backward()
# 2 fwd and 2 bwd graph such that 4 graphs in total
graph_id = (
torch._inductor.cudagraph_trees.get_container(self.rank)
.tree_manager.new_graph_id()
.id
)
self.assertTrue(graph_id == 4)
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
def test_fsdp_setattr(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
# Test with basic FSDP wrapping (outer wrap around whole model)
from torch._dynamo.utils import counters
counters.clear()
m, inputs, correct_outputs = get_mutating_model(
f"{self.device_type}:{self.rank}"
)
fsdp_m = FSDP(m, use_orig_params=True)
fsdp_m = torch.compile(fsdp_m, backend="eager", fullgraph=False)
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
self.assertEqual(len(counters["graph_break"]), 1)
first_graph_break = list(counters["graph_break"].keys())[0] # noqa: RUF015
self.assertIn("setattr() on Tensor.requires_grad", first_graph_break)
@config.patch(inline_inbuilt_nn_modules=False)
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
def test_fsdp_unspecialized_forced_getattr_no_inline(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
# Test with basic FSDP wrapping (outer wrap around whole model)
from torch._dynamo.utils import counters
counters.clear()
m, inputs, correct_outputs = get_forced_getattr_module(
f"{self.device_type}:{self.rank}"
)
fsdp_m = FSDP(m, use_orig_params=True)
fsdp_m = torch.compile(fsdp_m, backend="eager", fullgraph=False)
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
def test_fsdp_unspecialized_forced_getattr_inline(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
# Test with basic FSDP wrapping (outer wrap around whole model)
from torch._dynamo.utils import counters
counters.clear()
m, inputs, correct_outputs = get_forced_getattr_module(
f"{self.device_type}:{self.rank}"
)
fsdp_m = FSDP(m, use_orig_params=True)
fsdp_m = torch.compile(fsdp_m, backend="eager", fullgraph=False)
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_fsdp_inductor(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
# Test with basic FSDP wrapping (outer wrap around whole model)
m, inputs, correct_outputs = get_model(f"{self.device_type}:{self.rank}")
fsdp_m = FSDP(m, use_orig_params=True)
fsdp_m = torch.compile(fsdp_m, backend="inductor")
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
# Test with recursive wrapping, nested FSDP around each Linear
m, inputs, correct_outputs = get_model(f"{self.device_type}:{self.rank}")
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
),
use_orig_params=True,
)
fsdp_m = torch.compile(fsdp_m, backend="inductor")
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@config.patch(enable_compiler_collectives=True)
@skip_if_lt_x_gpu(1)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_fsdp_activation_checkpointing(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
model, inputs = get_toy_model_for_activation_checkpointing(
f"{self.device_type}:{self.rank}"
)
is_inner = lambda module: isinstance(module, ToyInnerModel) # noqa: E731
wrap_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=is_inner)
model = apply_fsdp_with_checkpointing(model, wrap_policy, is_inner)
correct_outputs = model(inputs)
cnt = torch._dynamo.testing.CompileCounterWithBackend("inductor")
opt_model = torch.compile(model, backend=cnt)
outputs = opt_model(inputs)
self.assertTrue(same(correct_outputs, outputs))
# Each FSDP module is a separate graph
self.assertEqual(cnt.frame_count, 2)
self.assertTrue(
find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None
)
@import_transformers_or_skip()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert
@patch.object(torch._inductor.config.triton, "cudagraphs", False)
@patch.object(torch._inductor.config, "fallback_random", True)
@config.patch(enable_compiler_collectives=True)
@unittest.skipIf(
PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
"Inaccurate results with fused SDPA kernels",
)
def test_hf_bert_fsdp(self):
def apply_fsdp(model, wrap_policy):
model = FSDP(
copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True
)
return model
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
for wrap_policy, test_instance in (
(None, "FSDP without recursive wrapping"),
):
print(f"Running hf_bert test for {test_instance}")
model, inputs = get_hf_bert(self.rank)
reset_rng_state()
eager_model = apply_fsdp(model, wrap_policy)
correct_outputs = eager_model(**inputs)
correct_loss = correct_outputs.loss
correct_loss.backward()
reset_rng_state()
opt_model = apply_fsdp(model, wrap_policy)
opt_model = torch.compile(opt_model, backend="inductor")
opt_outputs = opt_model(**inputs)
opt_loss = opt_outputs.loss
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
correct_results = collect_results(
eager_model, correct_outputs.logits, correct_loss, inputs_flat
)
opt_results = collect_results(
opt_model, opt_outputs.logits, opt_loss, inputs_flat
)
self.assertTrue(same(correct_results, opt_results))
@import_transformers_or_skip()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert
@patch.object(torch._inductor.config.triton, "cudagraphs", False)
@patch.object(torch._inductor.config, "fallback_random", True)
@config.patch(guard_nn_modules=True, enable_compiler_collectives=True)
def test_hf_bert_fsdp_activation_checkpointing(self):
from transformers.models.bert.modeling_bert import BertLayer
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
for wrap_policy, test_instance in (
(
functools.partial(
transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,)
),
"FSDP with recursive wrapping BertLayer instances",
),
):
print(
f"Running hf_bert_activation_checkpointing test for {test_instance}"
)
model, inputs = get_hf_bert(self.rank)
check_fn = lambda submodule: isinstance( # noqa: E731
submodule, BertLayer
)
reset_rng_state()
eager_model = apply_fsdp_with_checkpointing(
model, wrap_policy, check_fn
)
correct_outputs = eager_model(**inputs)
correct_loss = correct_outputs.loss
correct_loss.backward()
reset_rng_state()
opt_model = apply_fsdp_with_checkpointing(model, wrap_policy, check_fn)
opt_model = torch.compile(opt_model, backend="inductor")
opt_outputs = opt_model(**inputs)
opt_loss = opt_outputs.loss
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
correct_results = collect_results(
eager_model, correct_outputs.logits, correct_loss, inputs_flat
)
opt_results = collect_results(
opt_model, opt_outputs.logits, opt_loss, inputs_flat
)
self.assertTrue(same(correct_results, opt_results))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_automatic_dynamic_tensor(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
class SimpleModel(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.linear = nn.Linear(input_size, output_size)
def forward(self, x):
return self.linear(x)
torch._dynamo.utils.clear_compilation_metrics()
model = SimpleModel(10, 2).to(self.rank)
model.forward = torch.compile(model.forward)
ddp_model = DDP(model, device_ids=[self.rank])
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
def B(s):
return [torch.randn(s, 10), torch.randint(0, 2, (s,))]
if self.rank == 0:
dataloader = [B(5), B(8), B(6)]
else:
dataloader = [B(6), B(6), B(3)]
for data, labels in dataloader:
data, labels = data.to(self.rank), labels.to(self.rank)
optimizer.zero_grad()
output = ddp_model(data)
loss = loss_fn(output, labels)
loss.backward()
optimizer.step()
metrics = torch._dynamo.utils.get_compilation_metrics()
# Number of compiles same on all nodes
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_automatic_dynamic_scalar(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
# TODO: This should be possible to do inside the function, but
device = f"{self.device_type}:{self.rank}"
@torch.compile()
def f(x, y):
return x + torch.ones(y, device=device).sum()
if self.rank == 0:
dataloader = [3, 3, 7]
else:
dataloader = [3, 4, 9]
for data in dataloader:
f(torch.randn(5, device=self.rank), data)
metrics = torch._dynamo.utils.get_compilation_metrics()
# Number of compiles same on all nodes
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_automatic_dynamic_speculation_divergence(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(x, y):
zx = x.shape # noqa: F841
zy = y.shape # noqa: F841
return x.sum() + y.sum()
if self.rank == 0:
dataloader = [4, 4]
else:
dataloader = [3, 4]
for data in dataloader:
f(
torch.randn(data, device=self.rank),
torch.randn(data, device=self.rank),
)
metrics = torch._dynamo.utils.get_compilation_metrics()
# Number of compiles same on all nodes
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_graph_break_empty_graph_still_collective(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(x, y):
z = y # noqa: F841
print("woof")
zx = x.shape # noqa: F841
zy = y.shape # noqa: F841
return x.sum() + y.sum()
if self.rank == 0:
dataloader = [5, 5, 6]
else:
dataloader = [3, 4, 5]
for data in dataloader:
f(
torch.randn(data, device=self.rank),
torch.randn(data, device=self.rank),
)
metrics = torch._dynamo.utils.get_compilation_metrics()
# Number of compiles same on all nodes
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_dim_mismatch(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(x, y):
zx = x.shape # noqa: F841
zy = y.shape # noqa: F841
return x.sum() + y.sum()
if self.rank == 0:
dataloader = [[4, 2]]
else:
dataloader = [[3]]
for data in dataloader:
f(
torch.randn(data, device=self.rank),
torch.randn(data, device=self.rank),
)
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_missing_source(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(rank, xs):
return xs[rank].sum()
xs = []
for _ in range(self.world_size):
xs.append(torch.randn(10, device=self.rank))
f(self.rank, xs)
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_scalar_missing_source(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(rank, xs):
return torch.tensor(xs[rank], device=self.rank)
xs = []
for i in range(self.world_size):
xs.append(10 + i)
f(self.rank, xs)
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@config.patch(enable_compiler_collectives=True)
def test_compiler_collectives_type_mismatch(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(x):
if isinstance(x, int):
return torch.tensor(x, device=self.rank)
else:
return x.sum()
if self.rank == 0:
x = torch.randn(10, device=self.rank)
else:
x = 12
f(x)
# This deadlocks, I guess we don't support this
"""
if self.rank == 0:
x = torch.randn(12, device=self.rank)
else:
x = 10
f(x)
"""
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@enable_guard_collectives()
def test_guard_collective(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(x):
return x.sum()
x = torch.randn(10, device=self.rank)
f(x)
if self.rank == 0:
x = torch.randn(10, device=self.rank)
else:
x = torch.randn(12, device=self.rank) # recompile on one rank
f(x)
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(torch._dynamo.config, "enable_compiler_collectives", True)
@patch.object(torch._inductor.config, "max_autotune_gemm", True)
@patch.object(torch._inductor.config, "distributed_max_autotune_gemm", True)
def test_multiproc_autotune(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(a, b, c):
res = (
torch.sum((a @ b) + 1.0)
+ torch.sum(torch.relu(b @ c))
+ torch.sum(c @ a)
)
return res
a = torch.randn(1024, 1024, device=self.rank, dtype=torch.bfloat16)
b = torch.randn(1024, 2048, device=self.rank, dtype=torch.bfloat16)
c = torch.randn(2048, 1024, device=self.rank, dtype=torch.bfloat16)
try:
f(a, b, c)
except Exception:
log.exception("Caught exception running f")
raise
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
print(f"Result from {self.rank} is {f(a, b, c)}")
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(torch._dynamo.config, "enable_compiler_collectives", True)
@patch.object(torch._inductor.config, "max_autotune_gemm", True)
@patch.object(torch._inductor.config, "distributed_max_autotune_gemm", True)
def test_multiproc_autotune_dynamic_shapes(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
@torch.compile()
def f(a, b, c):
res = (
torch.sum((a @ b) + 1.0)
+ torch.sum(torch.relu(b @ c))
+ torch.sum(c @ a)
)
return res
a = torch.randn(1024, 1024, device=self.rank, dtype=torch.bfloat16)
b = torch.randn(1024, 2048, device=self.rank, dtype=torch.bfloat16)
c = torch.randn(2048, 1024, device=self.rank, dtype=torch.bfloat16)
# Mark tensors as dynamic on dimension 0
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(a, 1)
torch._dynamo.mark_dynamic(b, 0)
torch._dynamo.mark_dynamic(b, 1)
torch._dynamo.mark_dynamic(c, 0)
torch._dynamo.mark_dynamic(c, 1)
try:
f(a, b, c)
except Exception:
log.exception("Caught exception running f")
raise
metrics = torch._dynamo.utils.get_compilation_metrics()
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
print(f"Result from {self.rank} is {f(a, b, c)}")
# Store the initial compilation count
initial_compile_count = len(metrics)
# # Test with different sizes to ensure dynamic shapes work without recompilation
a2 = torch.randn(512, 512, device=self.rank, dtype=torch.bfloat16)
b2 = torch.randn(512, 2048, device=self.rank, dtype=torch.bfloat16)
c2 = torch.randn(2048, 512, device=self.rank, dtype=torch.bfloat16)
try:
result2 = f(a2, b2, c2)
print(f"Result2 from {self.rank} is {result2}")
except Exception:
log.exception("Caught exception running f with different sizes")
raise
# Verify no recompilation occurred
metrics_after = torch._dynamo.utils.get_compilation_metrics()
final_compile_count = len(metrics_after)
self.assertEqual(
initial_compile_count,
final_compile_count,
"Expected no recompilation with dynamic shapes",
)
# Verify all ranks have the same compilation count
res_after = [None] * self.world_size
torch.distributed.all_gather_object(res_after, final_compile_count)
for r in res_after[1:]:
self.assertEqual(res_after[0], r)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_get_pg_attr(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
pg = dist.distributed_c10d._get_default_group()
device = f"{self.device_type}:{self.rank}"
@torch.compile(fullgraph=True)
def f(x):
if dist.distributed_c10d._rank_not_in_group(pg):
return x + 1
else:
return x - 1
x = torch.ones(4, device=device)
self.assertEqual(f(x), x - 1)
pg = dist.distributed_c10d.GroupMember.NON_GROUP_MEMBER
self.assertEqual(f(x), x + 1)
@skipIfXpu # ProcessGroupXCCL doesn't support _set_default_timeout yet.
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(torch._inductor.config, "fx_graph_cache", False)
@patch.object(torch._inductor.config, "fx_graph_remote_cache", False)
def test_asymmetric_compilation(self):
from torch._dynamo.comptime import comptime
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
device = f"{self.device_type}:{self.rank}"
pg = dist.distributed_c10d._get_default_group()
cnt = torch._dynamo.testing.CompileCounter()
sleep_time = 5
@torch.compile(backend=cnt)
def f(x):
if self.rank == 0:
comptime.sleep(sleep_time)
y = 2 * x
return y.sum()
backend = pg._get_backend(torch.device(device))
backend._set_default_timeout(timedelta(seconds=sleep_time - 2))
x = torch.ones(4, device=device)
# NCCL startup is lazy
w = pg.allreduce(x)
w.wait()
f(x)
if self.rank != 0:
# test fails with NCCL timeout without this line
dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(
timedelta(seconds=sleep_time)
)
w = pg.allreduce(x)
w.wait()
torch.accelerator.synchronize(device)
metrics = torch._dynamo.utils.get_compilation_metrics()
# Number of compiles same on all nodes
res = [None] * self.world_size
torch.distributed.all_gather_object(res, len(metrics))
for r in res[1:]:
self.assertEqual(res[0], r)
@skipIfXpu # ProcessGroupXCCL doesn't support _set_default_timeout yet.
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(torch._inductor.config, "fx_graph_cache", True)
@patch.object(torch._inductor.config, "fx_graph_remote_cache", False)
@patch.object(torch._inductor.config, "sleep_sec_TESTING_ONLY", 10)
def test_asymmetric_compilation_with_fx_cache(self):
from torch._dynamo.utils import counters
from torch._inductor.utils import fresh_cache
with fresh_cache(), _dynamo_dist_per_rank_init(self.rank, self.world_size):
torch._dynamo.utils.clear_compilation_metrics()
device = f"{self.device_type}:{self.rank}"
pg = dist.distributed_c10d._get_default_group()
@torch.compile
def f(x):
y = 2 * x
return y.sum()
backend = pg._get_backend(torch.device(device))
backend._set_default_timeout(timedelta(seconds=5))
counters.clear()
x = torch.ones(4, device=device)
f(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
w = pg.allreduce(x)
w.wait()
torch.accelerator.synchronize(device)
torch._dynamo.reset()
if self.rank == 0:
with fresh_cache():
f(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
else:
f(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
w = pg.allreduce(x)
w.wait()
torch.accelerator.synchronize(device)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@unittest.skipUnless(torch.accelerator.is_available(), "Requires accelerator")
| TestMultiProc |
python | paramiko__paramiko | paramiko/agent.py | {
"start": 13070,
"end": 15877
} | class ____(PKey):
"""
Private key held in a local SSH agent. This type of key can be used for
authenticating to a remote server (signing). Most other key operations
work as expected.
.. versionchanged:: 3.2
Added the ``comment`` kwarg and attribute.
.. versionchanged:: 3.2
Added the ``.inner_key`` attribute holding a reference to the 'real'
key instance this key is a proxy for, if one was obtainable, else None.
"""
def __init__(self, agent, blob, comment=""):
self.agent = agent
self.blob = blob
self.comment = comment
msg = Message(blob)
self.name = msg.get_text()
self._logger = get_logger(__file__)
self.inner_key = None
try:
self.inner_key = PKey.from_type_string(
key_type=self.name, key_bytes=blob
)
except UnknownKeyType:
# Log, but don't explode, since inner_key is a best-effort thing.
err = "Unable to derive inner_key for agent key of type {!r}"
self.log(DEBUG, err.format(self.name))
def log(self, *args, **kwargs):
return self._logger.log(*args, **kwargs)
def asbytes(self):
# Prefer inner_key.asbytes, since that will differ for eg RSA-CERT
return self.inner_key.asbytes() if self.inner_key else self.blob
def get_name(self):
return self.name
def get_bits(self):
# Have to work around PKey's default get_bits being crap
if self.inner_key is not None:
return self.inner_key.get_bits()
return super().get_bits()
def __getattr__(self, name):
"""
Proxy any un-implemented methods/properties to the inner_key.
"""
if self.inner_key is None: # nothing to proxy to
raise AttributeError(name)
return getattr(self.inner_key, name)
@property
def _fields(self):
fallback = [self.get_name(), self.blob]
return self.inner_key._fields if self.inner_key else fallback
def sign_ssh_data(self, data, algorithm=None):
msg = Message()
msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
# NOTE: this used to be just self.blob, which is not entirely right for
# RSA-CERT 'keys' - those end up always degrading to ssh-rsa type
# signatures, for reasons probably internal to OpenSSH's agent code,
# even if everything else wants SHA2 (including our flag map).
msg.add_string(self.asbytes())
msg.add_string(data)
msg.add_int(ALGORITHM_FLAG_MAP.get(algorithm, 0))
ptype, result = self.agent._send_message(msg)
if ptype != SSH2_AGENT_SIGN_RESPONSE:
raise SSHException("key cannot be used for signing")
return result.get_binary()
| AgentKey |
python | pytorch__pytorch | torch/utils/data/datapipes/iter/sharding.py | {
"start": 701,
"end": 3587
} | class ____(_ShardingIterDataPipe):
r"""
Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``).
After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the
original DataPipe, where `n` equals to the number of instances.
Args:
source_datapipe: Iterable DataPipe that will be sharded
"""
def __init__(
self, source_datapipe: IterDataPipe, sharding_group_filter=None
) -> None:
self.source_datapipe = source_datapipe
self.sharding_group_filter = sharding_group_filter
self.groups: dict[int, tuple[int, int]] = {}
self.num_of_instances = 1
self.instance_id = 0
self._update_num_of_instances()
def apply_sharding(
self, num_of_instances, instance_id, sharding_group=SHARDING_PRIORITIES.DEFAULT
):
if instance_id >= num_of_instances:
raise ValueError(
f"instance_id({instance_id}) should be smaller than num_of_instances({num_of_instances})"
)
if sharding_group == SHARDING_PRIORITIES.DEFAULT:
if len(self.groups) and SHARDING_PRIORITIES.DEFAULT not in self.groups:
raise RuntimeError(
"ShardingFilter cannot mix DEFAULT and non DEFAULT groups"
)
else:
if SHARDING_PRIORITIES.DEFAULT in self.groups:
raise RuntimeError(
"ShardingFilter cannot mix DEFAULT and non DEFAULT groups"
)
self.groups[sharding_group] = (num_of_instances, instance_id)
self._update_num_of_instances()
def _update_num_of_instances(self) -> None:
sorted_sharding_groups = [
self.groups[key]
for key in sorted(self.groups.keys())
if self.sharding_group_filter is None or key == self.sharding_group_filter
]
sorted_sharding_groups.reverse()
self.num_of_instances = 1
self.instance_id = 0
for group_num_of_instances, group_instance_id in sorted_sharding_groups:
self.instance_id += self.num_of_instances * group_instance_id
self.num_of_instances *= group_num_of_instances
def __iter__(self):
for i, item in enumerate(self.source_datapipe):
if i % self.num_of_instances == self.instance_id:
yield item
def __len__(self) -> int:
if isinstance(self.source_datapipe, Sized):
return len(self.source_datapipe) // self.num_of_instances + (
1
if (
self.instance_id < len(self.source_datapipe) % self.num_of_instances
)
else 0
)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
| ShardingFilterIterDataPipe |
python | modin-project__modin | modin/config/envvars.py | {
"start": 11349,
"end": 13373
} | class ____(EnvironmentVariableDisallowingExecutionAndBackendBothSet, type=str):
"""Engine to run on a single node of distribution."""
@classmethod
def put(cls, value: str) -> None:
"""
Set the storage format value.
Parameters
----------
value : str
Storage format value to set.
"""
value = cls.normalize(value)
# Backend.put() will set StorageFormat.
Backend.put(
Backend.get_backend_for_execution(
Execution(engine=Engine.get(), storage_format=value)
)
)
@classmethod
def get(cls) -> str:
"""
Get the storage format value.
Returns
-------
str
Storage format value.
"""
# We have to override get() because StorageFormat may need to get its
# value from the OS's environment variables for Backend or StorageFormat.
cls._warn_if_deprecated()
# First, check if we've already set the engine value.
if cls._value is not _UNSET:
return cls._value
storage_format_config_value = cls._get_value_from_config()
backend_config_value = Backend._get_value_from_config()
# If StorageFormat is in the OS's configuration, use the configured
# StorageFormat value. Otherwise, use the Backend config value if that
# exists. If it doesn't, fall back to the default StorageFormat value.
cls._value = (
storage_format_config_value
if storage_format_config_value is not _UNSET
else (
Backend.get_execution_for_backend(backend_config_value).storage_format
if backend_config_value is not _UNSET
else cls._get_default()
)
)
return cls._value
varname = "MODIN_STORAGE_FORMAT"
default = "Pandas"
choices = ("Pandas", "Native")
Execution = namedtuple("Execution", ["storage_format", "engine"])
| StorageFormat |
python | fsspec__filesystem_spec | fsspec/implementations/dask.py | {
"start": 527,
"end": 3932
} | class ____(AbstractFileSystem):
"""View files accessible to a worker as any other remote file-system
When instances are run on the worker, uses the real filesystem. When
run on the client, they call the worker to provide information or data.
**Warning** this implementation is experimental, and read-only for now.
"""
def __init__(
self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
):
super().__init__(**kwargs)
if not (fs is None) ^ (target_protocol is None):
raise ValueError(
"Please provide one of filesystem instance (fs) or"
" target_protocol, not both"
)
self.target_protocol = target_protocol
self.target_options = target_options
self.worker = None
self.client = client
self.fs = fs
self._determine_worker()
@staticmethod
def _get_kwargs_from_urls(path):
so = infer_storage_options(path)
if "host" in so and "port" in so:
return {"client": f"{so['host']}:{so['port']}"}
else:
return {}
def _determine_worker(self):
if _in_worker():
self.worker = True
if self.fs is None:
self.fs = filesystem(
self.target_protocol, **(self.target_options or {})
)
else:
self.worker = False
self.client = _get_client(self.client)
self.rfs = dask.delayed(self)
def mkdir(self, *args, **kwargs):
if self.worker:
self.fs.mkdir(*args, **kwargs)
else:
self.rfs.mkdir(*args, **kwargs).compute()
def rm(self, *args, **kwargs):
if self.worker:
self.fs.rm(*args, **kwargs)
else:
self.rfs.rm(*args, **kwargs).compute()
def copy(self, *args, **kwargs):
if self.worker:
self.fs.copy(*args, **kwargs)
else:
self.rfs.copy(*args, **kwargs).compute()
def mv(self, *args, **kwargs):
if self.worker:
self.fs.mv(*args, **kwargs)
else:
self.rfs.mv(*args, **kwargs).compute()
def ls(self, *args, **kwargs):
if self.worker:
return self.fs.ls(*args, **kwargs)
else:
return self.rfs.ls(*args, **kwargs).compute()
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
if self.worker:
return self.fs._open(
path,
mode=mode,
block_size=block_size,
autocommit=autocommit,
cache_options=cache_options,
**kwargs,
)
else:
return DaskFile(
fs=self,
path=path,
mode=mode,
block_size=block_size,
autocommit=autocommit,
cache_options=cache_options,
**kwargs,
)
def fetch_range(self, path, mode, start, end):
if self.worker:
with self._open(path, mode) as f:
f.seek(start)
return f.read(end - start)
else:
return self.rfs.fetch_range(path, mode, start, end).compute()
| DaskWorkerFileSystem |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 812,
"end": 5736
} | class ____:
default_kwargs = {}
response_data = {}
request_data = {}
context_data = []
default_status_code = 200
def login(self):
raise NotImplementedError
def is_admin(self):
raise NotImplementedError
def get_url_path_ctx(self):
return {}
def _test_cache_poisoning(self, path):
# Test for cache poisoning in URLs,
# to avoid problems like GHSA-7fcx-wwr3-99jv.
original_path = path
if not path.endswith("/"):
path += "/"
path += "lib.js"
r = self.client.head(path)
self.assertNotEqual(
r.status_code,
200,
f"Path vulnerable to cache poisoning. path={original_path}",
)
def assertResponse(self, path, name=None, method=None, data=None, **kwargs):
self.login()
if method is None:
method = self.client.get
if data is None:
data = {}
# Get view specific query data
request_data = self.request_data.get(path, {}).copy()
if not request_data:
request_data = self.request_data.get(name, {}).copy()
if "data" in request_data:
data.update(request_data["data"])
del request_data["data"]
response = method(path, data=data)
# Get response specific test data
response_data = self.response_data.get(path, {}).copy()
if not response_data:
response_data = self.response_data.get(name, {}).copy()
response_attrs = {
"status_code": response_data.pop("status_code", self.default_status_code),
}
response_attrs.update(kwargs)
response_attrs.update(response_data)
if self.context_data and getattr(response, "context"):
self._test_context(response)
for key, val in list(response_attrs.items()):
resp_val = getattr(response, key)
self.assertEqual(
resp_val,
val,
(
"Attribute mismatch for view {view} ({path}): "
"{key} != {expected} (got {value})".format(
view=name,
path=path,
key=key,
expected=val,
value=resp_val,
)
),
)
self._test_cache_poisoning(path)
return response
def _test_context(self, response):
"""
Allow for testing the template context rendered to verify no data leakage.
Usage::
def setUp(self):
self.context_data.append(self.pip)
"""
for key in list(response.context.keys()):
obj = response.context[key]
for not_obj in self.context_data:
if isinstance(obj, (list, set, tuple)):
self.assertNotIn(not_obj, obj)
print("{} not in {}".format(not_obj, obj))
else:
self.assertNotEqual(not_obj, obj)
print("{} is not {}".format(not_obj, obj))
def _test_url(self, urlpatterns):
deconstructed_urls = extract_views_from_urlpatterns(urlpatterns)
added_kwargs = {}
# we need to format urls with proper ids
url_ctx = self.get_url_path_ctx()
if url_ctx:
self.response_data = {
url.format(**url_ctx): data for url, data in self.response_data.items()
}
for view, regex, namespace, name in deconstructed_urls:
# Skip URL and views that are not named
if not name:
continue
request_data = self.request_data.get(name, {}).copy()
for key in list(re.compile(regex).groupindex.keys()):
if key in list(request_data.keys()):
added_kwargs[key] = request_data[key]
continue
if key not in self.default_kwargs:
raise Exception(
"URL argument not in test kwargs. Please add `%s`" % key
)
added_kwargs[key] = self.default_kwargs[key]
path = reverse(name, kwargs=added_kwargs)
self.assertResponse(path=path, name=name)
added_kwargs = {}
def setUp(self):
# Previous Fixtures
self.owner = create_user(username="owner", password="test")
self.tester = create_user(username="tester", password="test")
self.pip = get(
Project,
slug="pip",
users=[self.owner],
privacy_level="public",
main_language_project=None,
)
self.private = get(
Project,
slug="private",
privacy_level="private",
main_language_project=None,
)
| URLAccessMixin |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py | {
"start": 4228,
"end": 6993
} | class ____(GraniteMoeSharedDecoderLayer):
def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.shared_mlp = GraniteMoeHybridMLP(config)
# Either attention or mamba will be initialized, depending on the layer type.
self.self_attn = None
self.mamba = None
if config.layers_block_type[layer_idx] == "mamba":
self.mamba = GraniteMoeHybridMambaLayer(config, layer_idx)
else:
self.self_attn = GraniteMoeHybridAttention(config, layer_idx)
self.layer_type = config.layers_block_type[layer_idx]
# Allow non-MoE (dense)
self.block_sparse_moe = GraniteMoeHybridMoE(config) if config.num_local_experts > 0 else None
# Accept 0 experts: skip MoE if num_local_experts == 0
self.has_experts = getattr(config, "num_local_experts", 0) > 0
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[GraniteFlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
if self.mamba is not None:
hidden_states = self.mamba(
hidden_states=hidden_states,
cache_position=cache_position,
cache_params=past_key_values,
attention_mask=attention_mask,
**kwargs,
)
else:
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states * self.residual_multiplier
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.has_experts:
moe_hidden_states = self.block_sparse_moe(hidden_states)
hidden_states = moe_hidden_states + self.shared_mlp(hidden_states)
else:
hidden_states = self.shared_mlp(hidden_states)
hidden_states = residual + hidden_states * self.residual_multiplier
return hidden_states
| GraniteMoeHybridDecoderLayer |
python | matplotlib__matplotlib | lib/matplotlib/_type1font.py | {
"start": 3614,
"end": 3714
} | class ____(_Token):
kind = 'binary'
def value(self):
return self.raw[1:]
| _BinaryToken |
python | Lightning-AI__lightning | src/lightning/pytorch/tuner/lr_finder.py | {
"start": 19144,
"end": 20378
} | class ____(LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of iterations.
Arguments:
optimizer: wrapped optimizer.
end_lr: the final learning rate.
num_iter: the number of iterations over which the test occurs.
last_epoch: the index of last epoch. Default: -1.
"""
def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1):
self.end_lr = end_lr
self.num_iter = num_iter
super().__init__(optimizer, last_epoch)
@override
def get_lr(self) -> list[float]:
curr_iter = self.last_epoch + 1
r = curr_iter / self.num_iter
if self.last_epoch > 0:
val = [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs]
else:
val = list(self.base_lrs)
self._lr = val
return val
@property
def lr(self) -> Union[float, list[float]]:
return self._lr
def _try_loop_run(trainer: "pl.Trainer", params: dict[str, Any]) -> None:
loop = trainer.fit_loop
loop.load_state_dict(deepcopy(params["loop_state_dict"]))
loop.restarting = False
loop.run()
| _ExponentialLR |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassSlots1.py | {
"start": 282,
"end": 461
} | class ____:
x: int
def __init__(self):
self.x = 3
# This should generate an error because "y" is not in slots.
self.y = 3
@dataclass(slots=False)
| B |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 10271,
"end": 10575
} | class ____(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
ti: TaskInstance
dag_rel_path: str
bundle_info: BundleInfo
start_date: datetime
ti_context: TIRunContext
sentry_integration: str
type: Literal["StartupDetails"] = "StartupDetails"
| StartupDetails |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/pipeline_job.py | {
"start": 11518,
"end": 14784
} | class ____(GoogleCloudBaseOperator):
"""
Get a Pipeline job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job_id: Required. The ID of the PipelineJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = [
"region",
"pipeline_job_id",
"project_id",
"impersonation_chain",
]
operator_extra_links = (VertexAIPipelineJobLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
pipeline_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.pipeline_job_id = pipeline_job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = PipelineJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get Pipeline job: %s", self.pipeline_job_id)
result = hook.get_pipeline_job(
project_id=self.project_id,
region=self.region,
pipeline_job_id=self.pipeline_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIPipelineJobLink.persist(context=context, pipeline_id=self.pipeline_job_id)
self.log.info("Pipeline job was gotten.")
return types.PipelineJob.to_dict(result)
except NotFound:
self.log.info("The Pipeline job %s does not exist.", self.pipeline_job_id)
| GetPipelineJobOperator |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/tumblr/tests.py | {
"start": 239,
"end": 1100
} | class ____(OAuthTestsMixin, TestCase):
provider_id = TumblrProvider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""
{
"meta": {
"status": 200,
"msg": "OK"
},
"response": {
"user": {
"following": 263,
"default_post_format": "html",
"name": "derekg",
"likes": 606,
"blogs": [
{
"name": "derekg",
"title": "Derek Gottfrid",
"url": "http://derekg.org/",
"tweet": "auto",
"primary": true,
"followers": 33004929
},
{
"name": "ihatehipstrz",
"title": "I Hate Hipstrz"
}
]
}
} }
""",
)
]
def get_expected_to_str(self):
return "derekg"
| TumblrTests |
python | django-extensions__django-extensions | tests/test_compat.py | {
"start": 142,
"end": 975
} | class ____(TestCase):
@override_settings(TEMPLATES=None)
def test_should_return_None_by_default_if_TEMPLATES_setting_is_none(self):
self.assertIsNone(get_template_setting("template_key"))
@override_settings(TEMPLATES=None)
def test_should_return_default_if_TEMPLATES_setting_is_none(self):
self.assertEqual(get_template_setting("template_key", "test"), "test")
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["templates"],
"APP_DIRS": True,
}
]
)
def test_should_return_value_for_key(self):
self.assertEqual(
get_template_setting("BACKEND"),
"django.template.backends.django.DjangoTemplates",
)
| CompatTests |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 14082,
"end": 14968
} | class ____(RemoteConfig, NetworkConfig):
"""Configuration for a remote network host."""
collection: t.Optional[str] = None
connection: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> NetworkRemoteCompletionConfig:
"""Return the default settings."""
return filter_completion(network_completion()).get(self.name) or NetworkRemoteCompletionConfig(
name=self.name,
placeholder=True,
)
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, NetworkRemoteCompletionConfig)
super().apply_defaults(context, defaults)
self.collection = self.collection or defaults.collection
self.connection = self.connection or defaults.connection
@dataclasses.dataclass
| NetworkRemoteConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 44027,
"end": 44729
} | class ____(AssetSelection):
"""Used to represent a UI asset selection by changed in branch metadata. This should not be resolved against
an in-process asset graph.
"""
selected_changed_in_branch: Optional[str]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
"""This should not be invoked in user code."""
raise NotImplementedError
def to_selection_str(self) -> str:
if self.selected_changed_in_branch is None:
return "changed_in_branch:<null>"
return f'changed_in_branch:"{self.selected_changed_in_branch}"'
@whitelist_for_serdes
@record
| ChangedInBranchAssetSelection |
python | hynek__structlog | src/structlog/_output.py | {
"start": 6211,
"end": 8617
} | class ____:
r"""
Writes bytes into a file.
Args:
file: File to print to. (default: `sys.stdout`\ ``.buffer``)
Useful if you follow `current logging best practices
<logging-best-practices>` together with a formatter that returns bytes
(e.g. `orjson <https://github.com/ijl/orjson>`_).
.. versionadded:: 20.2.0
"""
__slots__ = ("_file", "_flush", "_lock", "_write")
def __init__(self, file: BinaryIO | None = None):
self._file = file or sys.stdout.buffer
self._write = self._file.write
self._flush = self._file.flush
self._lock = _get_lock_for_file(self._file)
def __getstate__(self) -> str:
"""
Our __getattr__ magic makes this necessary.
"""
if self._file is sys.stdout.buffer:
return "stdout"
if self._file is sys.stderr.buffer:
return "stderr"
raise PicklingError(
"Only BytesLoggers to sys.stdout and sys.stderr can be pickled."
)
def __setstate__(self, state: Any) -> None:
"""
Our __getattr__ magic makes this necessary.
"""
if state == "stdout":
self._file = sys.stdout.buffer
else:
self._file = sys.stderr.buffer
self._write = self._file.write
self._flush = self._file.flush
self._lock = _get_lock_for_file(self._file)
def __deepcopy__(self, memodict: dict[str, object]) -> BytesLogger:
"""
Create a new BytesLogger with the same attributes. Similar to pickling.
"""
if self._file not in (sys.stdout.buffer, sys.stderr.buffer):
raise copy.error(
"Only BytesLoggers to sys.stdout and sys.stderr "
"can be deepcopied."
)
newself = self.__class__(self._file)
newself._write = newself._file.write
newself._flush = newself._file.flush
newself._lock = _get_lock_for_file(newself._file)
return newself
def __repr__(self) -> str:
return f"<BytesLogger(file={self._file!r})>"
def msg(self, message: bytes) -> None:
"""
Write *message*.
"""
with self._lock:
self._write(message + b"\n")
self._flush()
log = debug = info = warn = warning = msg
fatal = failure = err = error = critical = exception = msg
| BytesLogger |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 3862,
"end": 3944
} | class ____(ReleaseTestError):
exit_code = ExitCode.ANYSCALE_ERROR
| JobBrokenError |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/TreeWidget.py | {
"start": 8940,
"end": 13168
} | class ____(QtWidgets.QTreeWidgetItem):
"""
TreeWidgetItem that keeps track of its own widgets and expansion state.
* Widgets may be added to columns before the item is added to a tree.
* Expanded state may be set before item is added to a tree.
* Adds setCheked and isChecked methods.
* Adds addChildren, insertChildren, and takeChildren methods.
"""
def __init__(self, *args):
QtWidgets.QTreeWidgetItem.__init__(self, *args)
self._widgets = {} # col: widget
self._tree = None
self._expanded = False
def setChecked(self, column, checked):
self.setCheckState(column, QtCore.Qt.CheckState.Checked if checked else QtCore.Qt.CheckState.Unchecked)
def isChecked(self, col):
return self.checkState(col) == QtCore.Qt.CheckState.Checked
def setExpanded(self, exp):
self._expanded = exp
QtWidgets.QTreeWidgetItem.setExpanded(self, exp)
def isExpanded(self):
return self._expanded
def setWidget(self, column, widget):
if column in self._widgets:
self.removeWidget(column)
self._widgets[column] = widget
tree = self.treeWidget()
if tree is None:
return
else:
tree.setItemWidget(self, column, widget)
def removeWidget(self, column):
del self._widgets[column]
tree = self.treeWidget()
if tree is None:
return
tree.removeItemWidget(self, column)
def treeWidgetChanged(self):
tree = self.treeWidget()
if self._tree is tree:
return
self._tree = self.treeWidget()
if tree is None:
return
for col, widget in self._widgets.items():
tree.setItemWidget(self, col, widget)
QtWidgets.QTreeWidgetItem.setExpanded(self, self._expanded)
def childItems(self):
return [self.child(i) for i in range(self.childCount())]
def addChild(self, child):
QtWidgets.QTreeWidgetItem.addChild(self, child)
TreeWidget.informTreeWidgetChange(child)
def addChildren(self, childs):
QtWidgets.QTreeWidgetItem.addChildren(self, childs)
for child in childs:
TreeWidget.informTreeWidgetChange(child)
def insertChild(self, index, child):
QtWidgets.QTreeWidgetItem.insertChild(self, index, child)
TreeWidget.informTreeWidgetChange(child)
def insertChildren(self, index, childs):
QtWidgets.QTreeWidgetItem.addChildren(self, index, childs)
for child in childs:
TreeWidget.informTreeWidgetChange(child)
def removeChild(self, child):
QtWidgets.QTreeWidgetItem.removeChild(self, child)
TreeWidget.informTreeWidgetChange(child)
def takeChild(self, index):
child = QtWidgets.QTreeWidgetItem.takeChild(self, index)
TreeWidget.informTreeWidgetChange(child)
return child
def takeChildren(self):
childs = QtWidgets.QTreeWidgetItem.takeChildren(self)
for child in childs:
TreeWidget.informTreeWidgetChange(child)
return childs
def setData(self, column, role, value):
# credit: ekhumoro
# http://stackoverflow.com/questions/13662020/how-to-implement-itemchecked-and-itemunchecked-signals-for-qtreewidget-in-pyqt4
checkstate = self.checkState(column)
text = self.text(column)
QtWidgets.QTreeWidgetItem.setData(self, column, role, value)
treewidget = self.treeWidget()
if treewidget is None:
return
if (role == QtCore.Qt.ItemDataRole.CheckStateRole and checkstate != self.checkState(column)):
treewidget.sigItemCheckStateChanged.emit(self, column)
elif (role in (QtCore.Qt.ItemDataRole.DisplayRole, QtCore.Qt.ItemDataRole.EditRole) and text != self.text(column)):
treewidget.sigItemTextChanged.emit(self, column)
def itemClicked(self, col):
"""Called when this item is clicked on.
Override this method to react to user clicks.
"""
| TreeWidgetItem |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 23855,
"end": 24314
} | class ____(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
| W27 |
python | allegroai__clearml | clearml/backend_interface/base.py | {
"start": 6315,
"end": 7832
} | class ____(InterfaceBase):
def __init__(self, id: str, session: Session = None, log: logging.Logger = None, **kwargs: Any) -> None:
super(IdObjectBase, self).__init__(session, log, **kwargs)
self._data = None
self._id = None
self.id = self.normalize_id(id)
@property
def id(self) -> Any:
return self._id
@id.setter
def id(self, value: str) -> None:
should_reload = value is not None and self._id is not None and value != self._id
self._id = value
if should_reload:
self.reload()
@property
def data(self) -> Any:
if self._data is None:
self.reload()
return self._data
@abc.abstractmethod
def _reload(self) -> None:
pass
def reload(self) -> None:
if not self.id and not self._offline_mode:
raise ValueError("Failed reloading %s: missing id" % type(self).__name__)
# noinspection PyBroadException
try:
self._data = self._reload()
except Exception as ex:
self.log.error("Failed reloading {} {}".format(type(self).__name__.lower(), self.id))
self.log.debug("Failed reloading {} {}: {}".format(type(self).__name__.lower(), self.id, ex))
@classmethod
def normalize_id(cls, id: str) -> str:
return id.strip() if id else None
@classmethod
def resolve_id(cls, obj: Any) -> Any:
if isinstance(obj, cls):
return obj.id
return obj
| IdObjectBase |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 2772,
"end": 3966
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.Cluster.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaCreateClusterOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=TEST_CLUSTER,
cluster_id=TEST_CLUSTER_ID,
request_id=None,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.create_cluster.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=TEST_CLUSTER,
cluster_id=TEST_CLUSTER_ID,
request_id=None,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaCreateClusterOperator |
python | PrefectHQ__prefect | tests/deployment/test_steps.py | {
"start": 1235,
"end": 8892
} | class ____:
async def test_run_step_runs_importable_functions(self):
output = await run_step(
{
"prefect.deployments.steps.run_shell_script": {
"script": "echo 'this is a test'",
}
},
)
assert isinstance(output, dict)
assert output == {
"stdout": "this is a test",
"stderr": "",
}
async def test_run_step_errors_with_improper_format(self):
with pytest.raises(ValueError, match="unexpected"):
await run_step(
{
"prefect.deployments.steps.run_shell_script": {
"script": "echo 'this is a test'"
},
"jedi": 0,
}
)
async def test_run_step_resolves_block_document_references_before_running(self):
await Secret(value="echo 'I am a secret!'").save(name="test-secret")
output = await run_step(
{
"prefect.deployments.steps.run_shell_script": {
"script": "{{ prefect.blocks.secret.test-secret }}",
}
}
)
assert isinstance(output, dict)
assert output == {
"stdout": "I am a secret!",
"stderr": "",
}
async def test_run_step_resolves_environment_variables_before_running(
self, monkeypatch
):
monkeypatch.setenv("TEST_ENV_VAR", "test_value")
output = await run_step(
{
"prefect.deployments.steps.run_shell_script": {
"script": 'echo "{{ $TEST_ENV_VAR }}"',
}
}
)
assert isinstance(output, dict)
assert output == {
"stdout": "test_value",
"stderr": "",
}
async def test_run_step_resolves_variables_before_running(self, variables):
output = await run_step(
{
"prefect.deployments.steps.run_shell_script": {
"script": (
"echo '{{ prefect.variables.test_variable_1 }}:{{"
" prefect.variables.test_variable_2 }}'"
),
}
}
)
assert isinstance(output, dict)
assert output == {
"stdout": "test_value_1:test_value_2",
"stderr": "",
}
async def test_run_step_runs_async_functions(self):
output = await run_step(
{
"anyio.run_process": {
"command": ["echo", "hello world"],
}
}
)
assert output.returncode == 0
assert output.stdout.decode().strip() == "hello world"
async def test_requirement_installation_successful(self, monkeypatch):
"""
Test that the function attempts to install the package and succeeds.
"""
import_module_mock = MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module", import_module_mock
)
monkeypatch.setattr(subprocess, "check_call", MagicMock())
import_object_mock = MagicMock(side_effect=[ImportError, lambda x: x])
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", import_object_mock
)
await run_step(
{"test_module.test_function": {"requires": "test-package>=1.0.0", "x": 1}}
)
import_module_mock.assert_called_once_with("test_package")
assert (
import_object_mock.call_count == 2
) # once before and once after installation
subprocess.check_call.assert_called_once_with(
[uv.find_uv_bin(), "pip", "install", "test-package>=1.0.0"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@pytest.mark.parametrize(
"package,expected",
[
("prefect-aws", "prefect[aws]"),
("prefect-gcp", "prefect[gcp]"),
("prefect-azure", "prefect[azure]"),
("prefect-docker", "prefect[docker]"),
("prefect-kubernetes", "prefect[kubernetes]"),
],
)
async def test_requirement_installation_uses_prefect_extras(
self, monkeypatch, package, expected
):
import_module_mock = MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module", import_module_mock
)
monkeypatch.setattr(subprocess, "check_call", MagicMock())
import_object_mock = MagicMock(side_effect=[ImportError, lambda x: x])
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", import_object_mock
)
await run_step({"test_module.test_function": {"requires": package, "x": 1}})
import_module_mock.assert_called_once_with(package.replace("-", "_"))
assert (
import_object_mock.call_count == 2
) # once before and once after installation
subprocess.check_call.assert_called_once_with(
[uv.find_uv_bin(), "pip", "install", expected],
stdout=sys.stdout,
stderr=sys.stderr,
)
async def test_install_multiple_requirements(self, monkeypatch):
"""
Test that passing multiple requirements installs all of them.
"""
import_module_mock = MagicMock(side_effect=[None, ImportError])
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module", import_module_mock
)
monkeypatch.setattr(subprocess, "check_call", MagicMock())
import_object_mock = MagicMock(side_effect=[lambda x: x])
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", import_object_mock
)
await run_step(
{
"test_module.test_function": {
"requires": ["test-package>=1.0.0", "another"],
"x": 1,
}
}
)
import_module_mock.assert_has_calls([call("test_package"), call("another")])
subprocess.check_call.assert_called_once_with(
[uv.find_uv_bin(), "pip", "install", "test-package>=1.0.0", "another"],
stdout=sys.stdout,
stderr=sys.stderr,
)
async def test_requirement_installation_failure(self, monkeypatch, caplog):
"""
Test that the function logs a warning if it fails to install the package.
"""
# Mocking the import_module function to always raise ImportError
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
MagicMock(side_effect=ImportError),
)
# Mock subprocess.check_call to simulate failed package installation
monkeypatch.setattr(
subprocess,
"check_call",
MagicMock(side_effect=subprocess.CalledProcessError(1, ["pip"])),
)
with pytest.raises(ImportError):
await run_step(
{
"test_module.test_function": {
"requires": "nonexistent-package>=1.0.0"
}
}
)
assert subprocess.check_call.called
record = next(
(
record
for record in caplog.records
if "Unable to install required packages" in record.message
),
None,
)
assert record is not None, "No warning was logged"
assert record.levelname == "WARNING"
| TestRunStep |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/entity_key.py | {
"start": 355,
"end": 702
} | class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
assetKey = graphene.NonNull(GrapheneAssetKey)
class Meta:
name = "AssetCheckhandle"
def __init__(self, handle: AssetCheckKey):
super().__init__(name=handle.name, assetKey=GrapheneAssetKey(path=handle.asset_key.path))
| GrapheneAssetCheckHandle |
python | cython__cython | pyximport/pyximport.py | {
"start": 11579,
"end": 13606
} | class ____(ExtensionFileLoader):
def __init__(self, filename, pyxbuild_dir, inplace, language_level):
module_name = os.path.splitext(os.path.basename(filename))[0]
super().__init__(module_name, filename)
self._pyxbuild_dir = pyxbuild_dir
self._inplace = inplace
self._language_level = language_level
def create_module(self, spec):
try:
so_path = build_module(spec.name, pyxfilename=spec.origin, pyxbuild_dir=self._pyxbuild_dir,
inplace=self._inplace, language_level=self._language_level)
self.path = so_path
spec.origin = so_path
return super().create_module(spec)
except Exception as failure_exc:
_debug("Failed to load extension module: %r" % failure_exc)
if pyxargs.load_py_module_on_import_failure and spec.origin.endswith(PY_EXT):
spec = importlib.util.spec_from_file_location(spec.name, spec.origin,
loader=SourceFileLoader(spec.name, spec.origin))
mod = importlib.util.module_from_spec(spec)
assert mod.__file__ in (spec.origin, spec.origin + 'c', spec.origin + 'o'), (mod.__file__, spec.origin)
return mod
else:
tb = sys.exc_info()[2]
import traceback
exc = ImportError("Building module %s failed: %s" % (
spec.name, traceback.format_exception_only(*sys.exc_info()[:2])))
raise exc.with_traceback(tb)
def exec_module(self, module):
try:
return super().exec_module(module)
except Exception as failure_exc:
import traceback
_debug("Failed to load extension module: %r" % failure_exc)
raise ImportError("Executing module %s failed %s" % (
module.__file__, traceback.format_exception_only(*sys.exc_info()[:2])))
#install args
| PyxImportLoader |
python | pytest-dev__pytest-django | pytest_django/fixtures.py | {
"start": 23340,
"end": 23946
} | class ____(Protocol):
"""The type of the `django_capture_on_commit_callbacks` fixture."""
def __call__(
self,
*,
using: str = ...,
execute: bool = ...,
) -> AbstractContextManager[list[Callable[[], Any]]]:
pass # pragma: no cover
@pytest.fixture
def django_capture_on_commit_callbacks() -> DjangoCaptureOnCommitCallbacks:
"""Captures transaction.on_commit() callbacks for the given database connection."""
from django.test import TestCase
return TestCase.captureOnCommitCallbacks # type: ignore[no-any-return]
| DjangoCaptureOnCommitCallbacks |
python | wandb__wandb | tests/system_tests/test_core/test_torch_full.py | {
"start": 1825,
"end": 2297
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.params = nn.ParameterList(
[nn.Parameter(torch.ones(10, 10)) for i in range(10)]
)
self.otherparam = nn.Parameter(torch.Tensor(5))
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
| ParameterModule |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/run_handler.py | {
"start": 831,
"end": 2449
} | class ____:
"""Handles the process of a dbt Cloud job run."""
job_id: int
run_id: int
args: Sequence[str]
client: DbtCloudWorkspaceClient
@classmethod
def run(
cls, job_id: int, args: Sequence[str], client: DbtCloudWorkspaceClient
) -> "DbtCloudJobRunHandler":
run_details = client.trigger_job_run(job_id, steps_override=[" ".join(["dbt", *args])])
dbt_cloud_run = DbtCloudRun.from_run_details(run_details=run_details)
return DbtCloudJobRunHandler(
job_id=job_id,
run_id=dbt_cloud_run.id,
args=args,
client=client,
)
def wait(self, timeout: Optional[float] = None) -> DbtCloudRun:
run_details = self.client.poll_run(run_id=self.run_id, poll_timeout=timeout)
dbt_cloud_run = DbtCloudRun.from_run_details(run_details=run_details)
return dbt_cloud_run
def get_run_results(self) -> Mapping[str, Any]:
return self.client.get_run_results_json(run_id=self.run_id)
def get_manifest(self) -> Mapping[str, Any]:
return self.client.get_run_manifest_json(run_id=self.run_id)
def list_run_artifacts(self) -> Sequence[str]:
return self.client.list_run_artifacts(run_id=self.run_id)
def get_completed_at_timestamp(result: Mapping[str, Any]) -> float:
# result["timing"] is a list of events in run_results.json
# For successful models and passing tests,
# the last item of that list includes the timing details of the execution.
return parser.parse(result["timing"][-1]["completed_at"]).timestamp()
@record
| DbtCloudJobRunHandler |
python | django__django | django/core/management/commands/test.py | {
"start": 287,
"end": 2367
} | class ____(BaseCommand):
help = "Discover and run tests in the specified modules or the current directory."
# DiscoverRunner runs the checks after databases are set up.
requires_system_checks = []
test_runner = None
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
self.test_runner = get_command_line_option(argv, "--testrunner")
super().run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="test_label",
nargs="*",
help=(
"Module paths to test; can be modulename, modulename.TestCase or "
"modulename.TestCase.test_method"
),
)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--testrunner",
help="Tells Django to use specified test runner class instead of "
"the one specified by the TEST_RUNNER setting.",
)
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, "add_arguments"):
test_runner_class.add_arguments(parser)
def handle(self, *test_labels, **options):
TestRunner = get_runner(settings, options["testrunner"])
time_keeper = TimeKeeper() if options.get("timing", False) else NullTimeKeeper()
parallel = options.get("parallel")
if parallel == "auto":
options["parallel"] = get_max_test_processes()
test_runner = TestRunner(**options)
with time_keeper.timed("Total run"):
failures = test_runner.run_tests(test_labels)
time_keeper.print_results()
if failures:
sys.exit(1)
| Command |
python | google__pytype | pytype/typegraph/cfg_utils_test.py | {
"start": 2183,
"end": 7547
} | class ____(unittest.TestCase):
"""Test variable-product utilities."""
def setUp(self):
super().setUp()
self.prog = cfg.Program()
self.current_location = self.prog.NewCFGNode()
def test_complexity_limit(self):
limit = cfg_utils.ComplexityLimit(5)
limit.inc()
limit.inc(2)
limit.inc()
self.assertRaises(cfg_utils.TooComplexError, limit.inc)
def test_variable_product(self):
u1 = self.prog.NewVariable([1, 2], [], self.current_location)
u2 = self.prog.NewVariable([3, 4], [], self.current_location)
product = cfg_utils.variable_product([u1, u2])
pairs = [[a.data for a in d] for d in product]
self.assertCountEqual(
pairs,
[
[1, 3],
[1, 4],
[2, 3],
[2, 4],
],
)
def test_deep_variable_product_raises(self):
x1, x2 = (DummyValue(i + 1) for i in range(2))
v1 = self.prog.NewVariable([x1, x2], [], self.current_location)
v2 = self.prog.NewVariable([x1, x2], [], self.current_location)
v3 = self.prog.NewVariable([x1, x2], [], self.current_location)
v4 = self.prog.NewVariable([x1, x2], [], self.current_location)
v5 = self.prog.NewVariable([x1, x2], [], self.current_location)
v6 = self.prog.NewVariable([x1, x2], [], self.current_location)
v7 = self.prog.NewVariable([x1, x2], [], self.current_location)
v8 = self.prog.NewVariable([x1, x2], [], self.current_location)
self.assertRaises(
cfg_utils.TooComplexError,
cfg_utils.deep_variable_product,
[v1, v2, v3, v4, v5, v6, v7, v8],
256,
)
def test_deep_variable_product_raises2(self):
x1, x2, x3, x4 = (DummyValue(i + 1) for i in range(4))
v1 = self.prog.NewVariable([x1, x2], [], self.current_location)
v2 = self.prog.NewVariable([x1, x2], [], self.current_location)
v3 = self.prog.NewVariable([x3, x4], [], self.current_location)
v4 = self.prog.NewVariable([x3, x4], [], self.current_location)
x1.set_parameters([v3])
x2.set_parameters([v4])
self.assertRaises(
cfg_utils.TooComplexError, cfg_utils.deep_variable_product, [v1, v2], 4
)
def test_variable_product_dict_raises(self):
values = [DummyValue(i + 1) for i in range(4)]
v1 = self.prog.NewVariable(values, [], self.current_location)
v2 = self.prog.NewVariable(values, [], self.current_location)
v3 = self.prog.NewVariable(values, [], self.current_location)
v4 = self.prog.NewVariable(values, [], self.current_location)
variabledict = {"v1": v1, "v2": v2, "v3": v3, "v4": v4}
self.assertRaises(
cfg_utils.TooComplexError,
cfg_utils.variable_product_dict,
variabledict,
4,
)
def test_deep_variable_product(self):
x1, x2, x3, x4, x5, x6 = (DummyValue(i + 1) for i in range(6))
v1 = self.prog.NewVariable([x1, x2], [], self.current_location)
v2 = self.prog.NewVariable([x3], [], self.current_location)
v3 = self.prog.NewVariable([x4, x5], [], self.current_location)
v4 = self.prog.NewVariable([x6], [], self.current_location)
x1.set_parameters([v2, v3])
product = cfg_utils.deep_variable_product([v1, v4])
rows = [{a.data for a in row} for row in product]
self.assertCountEqual(
rows,
[
{x1, x3, x4, x6},
{x1, x3, x5, x6},
{x2, x6},
],
)
def test_deep_variable_product_with_empty_variables(self):
x1 = DummyValue(1)
v1 = self.prog.NewVariable([x1], [], self.current_location)
v2 = self.prog.NewVariable([], [], self.current_location)
x1.set_parameters([v2])
product = cfg_utils.deep_variable_product([v1])
rows = [{a.data for a in row} for row in product]
self.assertCountEqual(rows, [{x1}])
def test_deep_variable_product_with_empty_top_layer(self):
x1 = DummyValue(1)
v1 = self.prog.NewVariable([x1], [], self.current_location)
v2 = self.prog.NewVariable([], [], self.current_location)
product = cfg_utils.deep_variable_product([v1, v2])
rows = [{a.data for a in row} for row in product]
self.assertCountEqual(rows, [{x1}])
def test_deep_variable_product_with_cycle(self):
x1, x2, x3, x4, x5, x6 = (DummyValue(i + 1) for i in range(6))
v1 = self.prog.NewVariable([x1, x2], [], self.current_location)
v2 = self.prog.NewVariable([x3], [], self.current_location)
v3 = self.prog.NewVariable([x4, x5], [], self.current_location)
v4 = self.prog.NewVariable([x6], [], self.current_location)
x1.set_parameters([v2, v3])
x5.set_parameters([v1])
product = cfg_utils.deep_variable_product([v1, v4])
rows = [{a.data for a in row} for row in product]
self.assertCountEqual(
rows,
[
{x1, x3, x4, x6},
{x1, x2, x3, x5, x6},
{x1, x3, x5, x6},
{x2, x6},
],
)
def test_variable_product_dict(self):
u1 = self.prog.NewVariable([1, 2], [], self.current_location)
u2 = self.prog.NewVariable([3, 4], [], self.current_location)
product = cfg_utils.variable_product_dict({"a": u1, "b": u2})
pairs = [{k: a.data for k, a in d.items()} for d in product]
self.assertCountEqual(
pairs,
[
{"a": 1, "b": 3},
{"a": 1, "b": 4},
{"a": 2, "b": 3},
{"a": 2, "b": 4},
],
)
| VariableProductTest |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 10190,
"end": 11384
} | class ____(BosonicOperator, Creator):
"""
Bosonic creation operator.
"""
op_symbol = 'b+'
def _dagger_(self):
return AnnihilateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element] + 1)
return amp*state.up(element)
else:
return Mul(self, state)
def __repr__(self):
return "CreateBoson(%s)" % self.state
def _latex(self, printer):
if self.state is S.Zero:
return "{b^\\dagger_{0}}"
else:
return "{b^\\dagger_{%s}}" % printer._print(self.state)
B = AnnihilateBoson
Bd = CreateBoson
| CreateBoson |
python | getsentry__sentry | tests/snuba/sessions/test_sessions.py | {
"start": 45597,
"end": 55511
} | class ____(TestCase, BaseMetricsTestCase):
backend = MetricsReleaseHealthBackend()
def setUp(self) -> None:
super().setUp()
# now_dt should be set to 17:40 of some day not in the future and (system time - now_dt)
# must be less than 90 days for the metrics DB TTL
ONE_DAY_AGO = timezone.now() - timedelta(days=1)
self.now_dt = ONE_DAY_AGO.replace(hour=17, minute=40, second=0)
self._5_min_ago_dt = self.now_dt - timedelta(minutes=5)
self._30_min_ago_dt = self.now_dt - timedelta(minutes=30)
self._1_h_ago_dt = self.now_dt - timedelta(hours=1)
self._2_h_ago_dt = self.now_dt - timedelta(hours=2)
self._3_h_ago_dt = self.now_dt - timedelta(hours=3)
self.now = self.now_dt.timestamp()
self._5_min_ago = self._5_min_ago_dt.timestamp()
self._30_min_ago = self._30_min_ago_dt.timestamp()
self._1_h_ago = self._1_h_ago_dt.timestamp()
self._2_h_ago = self._2_h_ago_dt.timestamp()
self._3_h_ago = self._3_h_ago_dt.timestamp()
def test_no_sessions(self) -> None:
"""
Tests that when there are no sessions the function behaves and returns 0
"""
actual = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=None,
rollup=60,
start=self._30_min_ago_dt,
end=self.now_dt,
)
assert 0 == actual
def test_sessions_in_environment(self) -> None:
"""
Tests that it correctly picks up the sessions for the selected environment
in the selected time, not counting other environments and other times
"""
prod_env = self.create_environment(name="production", project=self.project)
self.bulk_store_sessions(
[
self.build_session(
environment="development", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._2_h_ago, started=self._2_h_ago
),
]
)
prod_session_count = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=prod_env.id,
rollup=60,
start=self._1_h_ago_dt,
end=self.now_dt,
)
assert prod_session_count == 2
def test_environment_without_sessions(self) -> None:
"""
We should get zero sessions, even if the environment name has not been indexed
by the metrics indexer.
"""
env_without_sessions = self.create_environment(
name="this_has_no_sessions", project=self.project
)
self.bulk_store_sessions(
[
self.build_session(
environment="production",
received=self._5_min_ago,
started=self._5_min_ago,
),
self.build_session(
environment=None, received=self._5_min_ago, started=self._5_min_ago
),
]
)
count_env_all = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=None,
rollup=60,
start=self._1_h_ago_dt,
end=self.now_dt,
)
assert count_env_all == 2
count_env_new = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=env_without_sessions.id,
rollup=60,
start=self._1_h_ago_dt,
end=self.now_dt,
)
assert count_env_new == 0
def test_sessions_in_all_environments(self) -> None:
"""
When the environment is not specified sessions from all environments are counted
"""
self.bulk_store_sessions(
[
self.build_session(
environment="development", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._2_h_ago, started=self._2_h_ago
),
self.build_session(
environment="development", received=self._2_h_ago, started=self._2_h_ago
),
]
)
actual = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=None,
rollup=60,
start=self._1_h_ago_dt,
end=self.now_dt,
)
assert actual == 3
def test_sessions_from_multiple_projects(self) -> None:
"""
Only sessions from the specified project are considered
"""
self.project_2 = self.create_project()
self.bulk_store_sessions(
[
self.build_session(
environment="development", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production", received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment="production",
received=self._5_min_ago,
project_id=self.project_2.id,
started=self._5_min_ago,
),
]
)
actual = self.backend.get_project_sessions_count(
project_id=self.project.id,
environment_id=None,
rollup=60,
start=self._1_h_ago_dt,
end=self.now_dt,
)
assert actual == 2
def test_sessions_per_project_no_sessions(self) -> None:
"""
Tests that no sessions are returned
"""
self.project_2 = self.create_project()
actual = self.backend.get_num_sessions_per_project(
project_ids=[self.project.id, self.project_2.id],
environment_ids=None,
start=self._30_min_ago_dt,
end=self.now_dt,
)
assert [] == actual
def test_sesions_per_project_multiple_projects(self) -> None:
dev_env = self.create_environment(name="development", project=self.project)
prod_env = self.create_environment(name="production", project=self.project)
dev = "development"
prod = "production"
test = "test"
project_1 = self.project
project_2 = self.create_project()
project_3 = self.create_project()
self.bulk_store_sessions(
[
# counted in p1
self.build_session(
environment=dev, received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment=prod, received=self._5_min_ago, started=self._5_min_ago
),
self.build_session(
environment=dev, received=self._30_min_ago, started=self._30_min_ago
),
# ignored in p1
# ignored env
self.build_session(
environment=test, received=self._30_min_ago, started=self._30_min_ago
),
# too old
self.build_session(environment=prod, received=self._3_h_ago, started=self._3_h_ago),
# counted in p2
self.build_session(
environment=dev,
received=self._5_min_ago,
project_id=project_2.id,
started=self._5_min_ago,
),
# ignored in p2
# ignored env
self.build_session(
environment=test,
received=self._5_min_ago,
project_id=project_2.id,
started=self._5_min_ago,
),
# too old
self.build_session(
environment=prod,
received=self._3_h_ago,
project_id=project_2.id,
started=self._3_h_ago,
),
# ignored p3
self.build_session(
environment=dev,
received=self._5_min_ago,
project_id=project_3.id,
started=self._5_min_ago,
),
]
)
actual = self.backend.get_num_sessions_per_project(
project_ids=[project_1.id, project_2.id],
environment_ids=[dev_env.id, prod_env.id],
start=self._2_h_ago_dt,
end=self.now_dt,
)
assert set(actual) == {(project_1.id, 3), (project_2.id, 1)}
eids_tests: tuple[list[int] | None, ...] = ([], None)
for eids in eids_tests:
actual = self.backend.get_num_sessions_per_project(
project_ids=[project_1.id, project_2.id],
environment_ids=eids,
start=self._2_h_ago_dt,
end=self.now_dt,
)
assert set(actual) == {(project_1.id, 4), (project_2.id, 2)}
| CheckNumberOfSessions |
python | pytorch__pytorch | test/inductor/test_torchinductor_strided_blocks.py | {
"start": 50739,
"end": 51067
} | class ____(BlockDescriptorTestBase):
device = "cpu"
test_torchinductor.copy_tests(
CommonTemplate,
TritonBlockPointerTestCPU,
"cpu",
xfail_prop="_expected_failure_triton_cpu",
)
@unittest.skipIf(not HAS_GPU, "requires triton GPU backend")
@config.patch("triton.use_block_ptr", True)
| TritonBlockPointerTestCPU |
python | pypa__setuptools | setuptools/_distutils/tests/test_build_clib.py | {
"start": 240,
"end": 4331
} | class ____(support.TempdirManager):
def test_check_library_dist(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# 'libraries' option must be a list
with pytest.raises(DistutilsSetupError):
cmd.check_library_list('foo')
# each element of 'libraries' must a 2-tuple
with pytest.raises(DistutilsSetupError):
cmd.check_library_list(['foo1', 'foo2'])
# first element of each tuple in 'libraries'
# must be a string (the library name)
with pytest.raises(DistutilsSetupError):
cmd.check_library_list([(1, 'foo1'), ('name', 'foo2')])
# library name may not contain directory separators
with pytest.raises(DistutilsSetupError):
cmd.check_library_list(
[('name', 'foo1'), ('another/name', 'foo2')],
)
# second element of each tuple must be a dictionary (build info)
with pytest.raises(DistutilsSetupError):
cmd.check_library_list(
[('name', {}), ('another', 'foo2')],
)
# those work
libs = [('name', {}), ('name', {'ok': 'good'})]
cmd.check_library_list(libs)
def test_get_source_files(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# "in 'libraries' option 'sources' must be present and must be
# a list of source filenames
cmd.libraries = [('name', {})]
with pytest.raises(DistutilsSetupError):
cmd.get_source_files()
cmd.libraries = [('name', {'sources': 1})]
with pytest.raises(DistutilsSetupError):
cmd.get_source_files()
cmd.libraries = [('name', {'sources': ['a', 'b']})]
assert cmd.get_source_files() == ['a', 'b']
cmd.libraries = [('name', {'sources': ('a', 'b')})]
assert cmd.get_source_files() == ['a', 'b']
cmd.libraries = [
('name', {'sources': ('a', 'b')}),
('name2', {'sources': ['c', 'd']}),
]
assert cmd.get_source_files() == ['a', 'b', 'c', 'd']
def test_build_libraries(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
class FakeCompiler:
def compile(*args, **kw):
pass
create_static_lib = compile
cmd.compiler = FakeCompiler()
# build_libraries is also doing a bit of typo checking
lib = [('name', {'sources': 'notvalid'})]
with pytest.raises(DistutilsSetupError):
cmd.build_libraries(lib)
lib = [('name', {'sources': list()})]
cmd.build_libraries(lib)
lib = [('name', {'sources': tuple()})]
cmd.build_libraries(lib)
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
cmd.include_dirs = 'one-dir'
cmd.finalize_options()
assert cmd.include_dirs == ['one-dir']
cmd.include_dirs = None
cmd.finalize_options()
assert cmd.include_dirs == []
cmd.distribution.libraries = 'WONTWORK'
with pytest.raises(DistutilsSetupError):
cmd.finalize_options()
@pytest.mark.skipif('platform.system() == "Windows"')
def test_run(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
foo_c = os.path.join(pkg_dir, 'foo.c')
self.write_file(foo_c, 'int main(void) { return 1;}\n')
cmd.libraries = [('foo', {'sources': [foo_c]})]
build_temp = os.path.join(pkg_dir, 'build')
os.mkdir(build_temp)
cmd.build_temp = build_temp
cmd.build_clib = build_temp
# Before we run the command, we want to make sure
# all commands are present on the system.
ccmd = missing_compiler_executable()
if ccmd is not None:
self.skipTest(f'The {ccmd!r} command is not found')
# this should work
cmd.run()
# let's check the result
assert 'libfoo.a' in os.listdir(build_temp)
| TestBuildCLib |
python | google__jax | tests/mesh_utils_test.py | {
"start": 28986,
"end": 33209
} | class ____(test_util.JaxTestCase):
def test_get_prime_factors(self):
self.assertEqual(mesh_utils._get_prime_factors(1), []) # 1 has no factor.
self.assertEqual(mesh_utils._get_prime_factors(2), [2])
self.assertEqual(mesh_utils._get_prime_factors(4), [2, 2])
self.assertEqual(mesh_utils._get_prime_factors(8), [2, 2, 2])
self.assertEqual(mesh_utils._get_prime_factors(6), [2, 3])
self.assertEqual(mesh_utils._get_prime_factors(16), [2, 2, 2, 2])
self.assertEqual(mesh_utils._get_prime_factors(12), [2, 2, 3])
self.assertEqual(mesh_utils._get_prime_factors(121), [11, 11]) # square
self.assertEqual(mesh_utils._get_prime_factors(43), [43]) # prime
@parameterized.named_parameters(
(
'2x2x1',
[2, 2, 1],
[1, 2, 1],
4,
[], # infeasible
),
(
'12x4x4',
[12, 4, 4],
[2, 2, 1],
6,
[[6, 1, 1], [3, 2, 1], [3, 1, 2]],
),
(
'4x4x8',
[4, 4, 8],
[2, 2, 2],
4,
[[2, 2, 1], [2, 1, 2], [1, 2, 2], [1, 1, 4]],
),
)
def test_enumerate_feasible_axis_assignments(
self,
physical_mesh_shape,
assigned_physical_mesh_shape,
logical_axis_size,
expected_assignments,
):
assignment = int64_array([list(assigned_physical_mesh_shape)]).T
self.assertArraysEqual(
list(
mesh_utils._enumerate_feasible_logical_axis_assignments(
physical_mesh_shape,
assignment,
logical_axis_size=logical_axis_size,
)
),
[int64_array(a) for a in expected_assignments],
)
@parameterized.named_parameters(
(
'2x2x1',
[2, 2, 1],
[1, 2, 2, 1],
[
[1, 2, 1, 1],
[1, 1, 2, 1],
[1, 1, 1, 1],
],
),
(
'4x4x4',
[4, 4, 4],
[2, 1, 32],
[
[1, 1, 4],
[2, 1, 2],
[1, 1, 4],
],
),
(
'12x4x8',
[12, 4, 8],
[2, 8, 24],
[
[2, 2, 3],
[1, 2, 4],
[1, 2, 2],
],
),
)
def test_generate_logical_mesh(
self,
physical_mesh_shape,
logical_mesh_shape,
assignment,
):
assignment = np.array(assignment, dtype=np.int64)
physical_mesh = get_int_mesh(physical_mesh_shape)
logical_mesh = mesh_utils._generate_logical_mesh(
physical_mesh, logical_mesh_shape, assignment
)
self.assertEqual(logical_mesh.shape, tuple(logical_mesh_shape))
# We check that the logical mesh is assigned correctly using the following
# consistency check, which transforms the logical mesh back to physical
# mesh.
transpose = (
np.arange(assignment.size).reshape(assignment.shape).T.reshape([-1])
)
self.assertArraysEqual(
physical_mesh.reshape([-1]),
logical_mesh.reshape(np.reshape(assignment.T, [-1]))
.transpose(transpose)
.reshape([-1]),
)
def test_prefer_assignment_whole_axis_size(self):
self.assertTrue(
mesh_utils._prefer_first_logical_axis_assignment(
int64_array([1, 2, 1]),
int64_array([1, 1, 2]),
physical_mesh_shape=[2, 2, 4],
assignment=int64_array([[1, 1, 1]]).T,
)
)
def test_prefer_assignment_more_whole_axes(self):
# This entails the original implementation already.
self.assertTrue(
mesh_utils._prefer_first_logical_axis_assignment(
int64_array([4, 4, 1]),
int64_array([1, 1, 16]),
physical_mesh_shape=[4, 4, 16],
assignment=int64_array([[1, 1, 1]]).T,
)
)
def test_prefer_assignment_avoid_already_assigned(self):
self.assertTrue(
mesh_utils._prefer_first_logical_axis_assignment(
int64_array([2, 1]),
int64_array([1, 2]),
physical_mesh_shape=[2, 4],
assignment=int64_array([[1, 2]]).T,
)
)
if __name__ == '__main__':
absltest.main()
| SplitAxesDeviceMeshCreationTest |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/directive.py | {
"start": 1343,
"end": 1690
} | class ____(dict[str, Callable[[str], str]]): # NoQA: FURB189
"""An option_spec allows any options."""
def __bool__(self) -> bool:
"""Behaves like some options are defined."""
return True
def __getitem__(self, _key: str) -> Callable[[str], str]:
return lambda x: x
# Retained: legacy class-based
| DummyOptionSpec |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/deps.py | {
"start": 6550,
"end": 16840
} | class ____(Exception):
def __init__(self, proc: subprocess.CompletedProcess):
self.proc = proc
self.stdout = proc.stdout.decode("utf-8")
self.stderr = proc.stderr.decode("utf-8")
lines = self.stdout.splitlines() + self.stderr.splitlines()
self.dependency_failure_lines = [
line
for line in lines
if "No matching distribution" in line
or "ResolutionImpossible" in line
or "No pre-built wheel was available" in line
]
def format_error(self) -> str:
lines = []
lines.append("Dependency build failure details:\n")
lines.append("Command:\n" + util.indent(" ".join(self.proc.args)))
if self.stdout:
lines.append("\nOutput:\n" + util.indent(self.stdout))
if self.stderr:
lines.append("\nError:\n" + util.indent(self.stderr))
return "".join(lines)
def build_deps_from_requirements(
requirements: DepsRequirements,
output_directory: str,
build_method: BuildMethod,
) -> tuple[str, str]:
os.makedirs(output_directory, exist_ok=True)
deps_requirements_filename = f"deps-requirements-{requirements.hash}.txt"
deps_requirements_path = os.path.join(output_directory, deps_requirements_filename)
tmp_pex_filename = f"deps-from-{requirements.hash}.pex"
tmp_pex_path = os.path.join(output_directory, tmp_pex_filename)
with open(deps_requirements_path, "w", encoding="utf-8") as deps_requirements_file:
deps_requirements_file.write(requirements.requirements_txt)
ui.print(
f"Building project dependencies for Python {requirements.python_version}, "
f"writing to {output_directory}",
)
def build_in_docker() -> None:
proc = docker_runner.run_dagster_cloud(
map_folders={"/output": output_directory},
run_args=[
"serverless",
"build-python-deps",
f"/output/{deps_requirements_filename}",
f"/output/{tmp_pex_filename}",
json.dumps(requirements.pex_flags),
],
env={"PEX_VERBOSE": None}, # pass through this env, if set
)
if proc.returncode:
ui.error("Failed to build dependencies using docker")
if proc.stdout:
ui.error(proc.stdout.decode("utf-8"))
if proc.stderr:
ui.error(proc.stderr.decode("utf-8"))
sys.exit(1)
if build_method in [BuildMethod.DOCKER_FALLBACK, BuildMethod.LOCAL]:
try:
build_deps_from_requirements_file(
deps_requirements_path,
output_pex_path=tmp_pex_path,
pex_flags=requirements.pex_flags,
)
except DepsBuildFailure as err:
if build_method == BuildMethod.DOCKER_FALLBACK and err.dependency_failure_lines:
ui.warn(
"Failed to build dependencies in current environment:"
f"{''.join(err.dependency_failure_lines)}"
)
ui.warn("Falling back to build in a docker environment")
build_in_docker()
else:
raise ui.error("Failed to build dependencies:\n" + err.format_error())
else:
ui.print("Building project dependencies in a docker build environment")
build_in_docker()
pex_info = util.get_pex_info(tmp_pex_path)
pex_hash = pex_info["pex_hash"]
final_pex_path = os.path.join(output_directory, f"deps-{pex_hash}.pex")
os.rename(tmp_pex_path, final_pex_path)
ui.print(f"Wrote deps pex: {final_pex_path}")
distribution_names = pex_info["distributions"].keys()
# the distributions are named something like 'dagster-1.0.14-py3-none-any.whl'
# and 'dagster_cloud-1.1.7-py3-none-any.whl'
dep_names = ["dagster", "dagster_cloud"]
dep_versions = {}
for name in distribution_names:
for dep_name in dep_names:
pattern = re.compile(f"{dep_name}-(.+?)-py")
match = pattern.match(name)
if match:
dep_versions[dep_name] = match.group(1)
break
for dep_name in dep_names:
if dep_name not in dep_versions:
raise ValueError(f"The {dep_name} package dependency was expected but not found.")
return final_pex_path, dep_versions["dagster"]
def build_deps_from_requirements_file(
deps_requirements_path: str,
output_pex_path: str,
pex_flags: list[str],
) -> None:
"""Attempts to build a pex file from a requirements file and raises DepsBuildFailure on failure."""
# We try different sets of build flags and use the first one that works
try_flags = TRY_FLAGS.copy()
while try_flags:
add_on_flags = try_flags.pop(0)
pex_flags = pex_flags + add_on_flags
logging.debug(f"Running pex with {' '.join(pex_flags)}")
proc = util.build_pex(
sources_directories=[],
requirements_filepaths=[deps_requirements_path],
pex_flags=pex_flags,
output_pex_path=output_pex_path,
)
if proc.returncode:
if try_flags:
ui.warn(proc.stderr.decode("utf-8"))
ui.warn("Will retry building deps with a different resolution mechanism")
else:
raise DepsBuildFailure(proc)
else:
break
def get_requirements_txt_deps(code_directory: str) -> list[str]:
requirements_path = os.path.join(code_directory, "requirements.txt")
if not os.path.exists(requirements_path):
return []
# combine multi-line strings into a single string
combined_lines = []
current_line = ""
with open(requirements_path, encoding="utf-8") as file:
for line in file:
stripped_line = line.rstrip()
if stripped_line.endswith("\\"):
current_line += stripped_line[:-1]
else:
current_line += stripped_line
combined_lines.append(current_line)
current_line = ""
# Add any remaining content if the last line ends with a backslash
if current_line:
combined_lines.append(current_line)
lines = []
for raw_line in combined_lines:
# https://pip.pypa.io/en/stable/reference/requirements-file-format/#comments
line = re.sub(r"(^#|\s#).*", "", raw_line)
line = line.strip()
# remove current dir from the deps
if line in {"", "."}:
continue
lines.append(line)
return lines
def get_setup_py_deps(code_directory: str, python_interpreter: str) -> list[str]:
setup_py_path = os.path.join(code_directory, "setup.py")
if not os.path.exists(setup_py_path):
return []
lines = []
# write out egg_info files and load as distribution
with tempfile.TemporaryDirectory() as temp_dir:
proc = subprocess.run(
[python_interpreter, setup_py_path, "egg_info", f"--egg-base={temp_dir}"],
capture_output=True,
check=False,
cwd=code_directory,
)
if proc.returncode:
raise ValueError(
"Error running setup.py egg_info: "
+ proc.stdout.decode("utf-8")
+ proc.stderr.decode("utf-8")
)
dists = list(importlib.metadata.distributions(path=[temp_dir]))
if len(dists) != 1:
raise ValueError(f"Could not find distribution for {setup_py_path}")
dist = dists[0]
for requirement in dist.requires or []:
lines.append(requirement)
return lines
def get_pyproject_toml_deps(code_directory: str) -> list[str]:
pyproject_path = os.path.join(code_directory, "pyproject.toml")
if not os.path.exists(pyproject_path):
return []
try:
with open(pyproject_path, "rb") as file:
pyproject_data = tomllib.load(file)
except Exception as e:
raise ValueError(f"Error parsing pyproject.toml: {e}")
lines = []
# Handle dependencies in [project] section (PEP 621)
project_section = pyproject_data.get("project", {})
dependencies = project_section.get("dependencies", [])
for dep in dependencies:
lines.append(str(dep))
# Handle optional dependencies in [project.optional-dependencies]
optional_deps = project_section.get("optional-dependencies", {})
for group_deps in optional_deps.values():
for dep in group_deps:
lines.append(str(dep))
# Handle legacy [tool.poetry.dependencies] for Poetry projects
poetry_section = pyproject_data.get("tool", {}).get("poetry", {})
poetry_deps = poetry_section.get("dependencies", {})
for dep_name, dep_spec in poetry_deps.items():
if dep_name == "python":
continue # Skip python version constraint
if isinstance(dep_spec, str):
lines.append(f"{dep_name}{dep_spec}")
elif isinstance(dep_spec, dict):
version_spec = dep_spec.get("version", "")
if version_spec:
lines.append(f"{dep_name}{version_spec}")
else:
# Handle complex dependency specs (git, path, etc.)
lines.append(dep_name)
# Handle legacy [tool.poetry.dev-dependencies] (older Poetry format)
poetry_dev_deps = poetry_section.get("dev-dependencies", {})
for dep_name, dep_spec in poetry_dev_deps.items():
if isinstance(dep_spec, str):
lines.append(f"{dep_name}{dep_spec}")
elif isinstance(dep_spec, dict):
version_spec = dep_spec.get("version", "")
if version_spec:
lines.append(f"{dep_name}{version_spec}")
else:
lines.append(dep_name)
return lines
@click.command()
@click.argument("project_dir", type=click.Path(exists=True))
@click.argument("build_output_dir", type=click.Path(exists=False))
@util.python_version_option()
def deps_main(project_dir, build_output_dir, python_version):
deps_pex_path, dagster_version = build_deps_pex(
project_dir, build_output_dir, util.parse_python_version(python_version)
)
if __name__ == "__main__":
deps_main()
| DepsBuildFailure |
python | realpython__materials | python-langgraph/graphs/notice_extraction.py | {
"start": 420,
"end": 4557
} | class ____(TypedDict):
notice_message: str
notice_email_extract: NoticeEmailExtract | None
escalation_text_criteria: str
escalation_dollar_criteria: float
requires_escalation: bool
escalation_emails: list[EmailStr] | None
follow_ups: dict[str, bool] | None
current_follow_up: str | None
workflow = StateGraph(GraphState)
def parse_notice_message_node(state: GraphState) -> GraphState:
"""Use the notice parser chain to extract fields from the notice"""
LOGGER.info("Parsing notice...")
notice_email_extract = NOTICE_PARSER_CHAIN.invoke(
{"message": state["notice_message"]}
)
state["notice_email_extract"] = notice_email_extract
return state
def check_escalation_status_node(state: GraphState) -> GraphState:
"""Determine whether a notice needs escalation"""
LOGGER.info("Determining escalation status...")
text_check = ESCALATION_CHECK_CHAIN.invoke(
{
"escalation_criteria": state["escalation_text_criteria"],
"message": state["notice_message"],
}
).needs_escalation
if (
text_check
or state["notice_email_extract"].max_potential_fine
>= state["escalation_dollar_criteria"]
):
state["requires_escalation"] = True
else:
state["requires_escalation"] = False
return state
def send_escalation_email_node(state: GraphState) -> GraphState:
"""Send an escalation email"""
send_escalation_email(
notice_email_extract=state["notice_email_extract"],
escalation_emails=state["escalation_emails"],
)
return state
def create_legal_ticket_node(state: GraphState) -> GraphState:
"""Node to create a legal ticket"""
follow_up = create_legal_ticket(
current_follow_ups=state.get("follow_ups"),
notice_email_extract=state["notice_email_extract"],
)
state["current_follow_up"] = follow_up
return state
def answer_follow_up_question_node(state: GraphState) -> GraphState:
"""Answer follow-up questions about the notice using
BINARY_QUESTION_CHAIN"""
if state["current_follow_up"]:
question = state["current_follow_up"] + " " + state["notice_message"]
answer = BINARY_QUESTION_CHAIN.invoke({"question": question})
if state.get("follow_ups"):
state["follow_ups"][state["current_follow_up"]] = answer
else:
state["follow_ups"] = {state["current_follow_up"]: answer}
return state
def route_escalation_status_edge(state: GraphState) -> str:
"""Determine whether to send an escalation email or create
a legal ticket"""
if state["requires_escalation"]:
LOGGER.info("Escalation needed!")
return "send_escalation_email"
LOGGER.info("No escalation needed")
return "create_legal_ticket"
def route_follow_up_edge(state: GraphState) -> str:
"""Determine whether a follow-up question is required"""
if state.get("current_follow_up"):
return "answer_follow_up_question"
return END
workflow.add_node("parse_notice_message", parse_notice_message_node)
workflow.add_node("check_escalation_status", check_escalation_status_node)
workflow.add_node("send_escalation_email", send_escalation_email_node)
workflow.add_node("create_legal_ticket", create_legal_ticket_node)
workflow.add_node("answer_follow_up_question", answer_follow_up_question_node)
workflow.add_edge(START, "parse_notice_message")
workflow.add_edge("parse_notice_message", "check_escalation_status")
workflow.add_conditional_edges(
"check_escalation_status",
route_escalation_status_edge,
{
"send_escalation_email": "send_escalation_email",
"create_legal_ticket": "create_legal_ticket",
},
)
workflow.add_conditional_edges(
"create_legal_ticket",
route_follow_up_edge,
{
"answer_follow_up_question": "answer_follow_up_question",
END: END,
},
)
workflow.add_edge("send_escalation_email", "create_legal_ticket")
workflow.add_edge("answer_follow_up_question", "create_legal_ticket")
NOTICE_EXTRACTION_GRAPH = workflow.compile()
| GraphState |
python | spyder-ide__spyder | installers-conda/build_conda_pkgs.py | {
"start": 2314,
"end": 8550
} | class ____:
"""Base class for building a conda package for conda-based installer"""
name = None
norm = True
source = None
feedstock = None
feedstock_branch = None
def __init__(self, data=None, debug=False):
data = {} if data is None else data
self.logger = logger.getChild(self.__class__.__name__)
self.debug = debug
self._bld_src = BUILD / self.name
self._fdstk_path = BUILD / self.feedstock.split("/")[-1]
self._patchfile = self._fdstk_path / "recipe" / "version.patch"
self._get_source()
self._get_version()
self.data = {'version': self.version}
self.data.update(data)
self.recipe_append = {}
self.recipe_clobber = {}
self._recipe_patched = False
def _get_source(self):
"""Clone source and feedstock for building"""
BUILD.mkdir(exist_ok=True)
self._cleanup_build()
if self.source == HERE.parent:
self._bld_src = self.source
self.repo = Repo(self.source)
else:
# Determine source and commit
if self.source is not None:
remote = self.source
commit = 'HEAD'
else:
cfg = ConfigParser()
cfg.read(EXTDEPS / self.name / '.gitrepo')
remote = cfg['subrepo']['remote']
commit = cfg['subrepo']['commit']
# Clone from source
kwargs = dict(to_path=self._bld_src)
self.logger.info("Cloning source...")
self.repo = Repo.clone_from(remote, **kwargs)
self.repo.git.checkout(commit)
# Clone feedstock
self.logger.info("Cloning feedstock...")
kwargs = dict(to_path=self._fdstk_path)
if self.feedstock_branch:
kwargs.update(branch=self.feedstock_branch)
feedstock_repo = Repo.clone_from(self.feedstock, **kwargs)
self.logger.info(
f"Feedstock branch: {feedstock_repo.active_branch.name}"
)
def _cleanup_build(self, debug=False):
"""Remove cloned source and feedstock repositories"""
if debug:
self.logger.info("Keeping cloned source and feedstock")
return
for src in [self._bld_src, self._fdstk_path]:
if src.exists() and src != HERE.parent:
self.logger.info(f"Removing {src}...")
rmtree(src)
def _get_version(self):
"""Get source version using setuptools_scm"""
v = get_version(self._bld_src, normalize=self.norm)
self.version = v.lstrip('v').split('+')[0]
def _patch_source(self):
pass
def _patch_conda_build_config(self):
file = self._fdstk_path / "recipe" / "conda_build_config.yaml"
if not file.exists():
return
contents = yaml.load(file.read_text())
file.rename(file.parent / ("_" + file.name)) # copy of original
pyver = sys.version_info
contents['python'] = [f"{pyver.major}.{pyver.minor}.* *_cpython"]
yaml.dump(contents, file)
self.logger.info(
f"Patched 'conda_build_config.yaml' contents:\n{file.read_text()}"
)
def _patch_meta(self):
file = self._fdstk_path / "recipe" / "meta.yaml"
meta = file.read_text()
# Replace jinja variable values
for k, v in self.data.items():
meta = re.sub(f".*set {k} =.*", f'{{% set {k} = "{v}" %}}', meta)
# Remove temporary patches
meta = re.sub(r"^\s*- temp-.+\.patch\n", "", meta, flags=re.MULTILINE)
file.rename(file.parent / ("_" + file.name)) # keep copy of original
file.write_text(meta)
self.logger.info(f"Patched 'meta.yaml' contents:\n{file.read_text()}")
def _add_recipe_append(self):
if self._patchfile.exists():
self.recipe_append.update(
{"source": {"patches": [self._patchfile.name]}}
)
if self.recipe_append:
file = self._fdstk_path / "recipe" / "recipe_append.yaml"
yaml.dump(self.recipe_append, file)
self.logger.info(
f"'recipe_append.yaml' contents:\n{file.read_text()}"
)
else:
self.logger.info("Skipping 'recipe_append.yaml'.")
def _add_recipe_clobber(self):
self.recipe_clobber.update({
"source": {
"url": None,
"sha256": None,
"path": self._bld_src.as_posix()},
})
if self.recipe_clobber:
file = self._fdstk_path / "recipe" / "recipe_clobber.yaml"
yaml.dump(self.recipe_clobber, file)
self.logger.info(
f"'recipe_clobber.yaml' contents:\n{file.read_text()}"
)
else:
self.logger.info("Skipping 'recipe_clobber.yaml'.")
def patch_recipe(self):
"""
Patch conda build recipe
1. Patch conda_build_config.yaml
2. Patch meta.yaml
3. Add recipe_append.yaml
4. Add recipe_clobber.yaml
"""
if self._recipe_patched:
return
self._patch_conda_build_config()
self._patch_meta()
self._add_recipe_append()
self._add_recipe_clobber()
self._recipe_patched = True
def build(self):
"""
Build the conda package.
1. Patch source
2. Patch the recipe
3. Build the package
4. Remove cloned repositories
"""
t0 = time()
try:
self._patch_source()
self.patch_recipe()
self.logger.info("Building conda package "
f"{self.name}={self.version}...")
check_call([
"conda", "build",
"--skip-existing", "--build-id-pat={n}",
"--no-test", "--no-anaconda-upload",
str(self._fdstk_path / "recipe")
])
finally:
self._recipe_patched = False
self._cleanup_build(self.debug)
elapse = timedelta(seconds=int(time() - t0))
self.logger.info(f"Build time = {elapse}")
| BuildCondaPkg |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_caught.py | {
"start": 449,
"end": 540
} | class ____(socket.error):
"""Not an exception for Python 2, but one in 3."""
| SkipException |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 476791,
"end": 477516
} | class ____(sgqlc.types.Type):
"""Parameters to be used for the branch_name_pattern rule"""
__schema__ = github_schema
__field_names__ = ("name", "negate", "operator", "pattern")
name = sgqlc.types.Field(String, graphql_name="name")
"""How this rule will appear to users."""
negate = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="negate")
"""If true, the rule will fail if the pattern matches."""
operator = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="operator")
"""The operator to use for matching."""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""The pattern to match with."""
| BranchNamePatternParameters |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_hue.py | {
"start": 233,
"end": 6466
} | class ____(BaseImagePreprocessingLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of `1.0` performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_hue = keras.layers.RandomHue(factor=0.5, value_range=[0, 1])
images = keras.ops.cast(images, "float32")
augmented_images_batch = random_hue(images[:8])
```
"""
_USE_BASE_FACTOR = True
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.value_range = value_range
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
invert = self.backend.random.uniform((batch_size,), seed=seed)
invert = self.backend.numpy.where(
invert > 0.5,
-self.backend.numpy.ones_like(invert),
self.backend.numpy.ones_like(invert),
)
factor = self.backend.random.uniform(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
return {"factor": invert * factor * 0.5}
def transform_images(self, images, transformation=None, training=True):
def _apply_random_hue(images, transformation):
images = self.backend.cast(images, self.compute_dtype)
images = self._transform_value_range(
images, self.value_range, (0, 1)
)
adjust_factors = transformation["factor"]
adjust_factors = self.backend.cast(adjust_factors, images.dtype)
adjust_factors = self.backend.numpy.expand_dims(adjust_factors, -1)
adjust_factors = self.backend.numpy.expand_dims(adjust_factors, -1)
images = self.backend.image.rgb_to_hsv(
images, data_format=self.data_format
)
if self.data_format == "channels_first":
h_channel = images[:, 0, :, :] + adjust_factors
h_channel = self.backend.numpy.where(
h_channel > 1.0, h_channel - 1.0, h_channel
)
h_channel = self.backend.numpy.where(
h_channel < 0.0, h_channel + 1.0, h_channel
)
images = self.backend.numpy.stack(
[h_channel, images[:, 1, :, :], images[:, 2, :, :]], axis=1
)
else:
h_channel = images[..., 0] + adjust_factors
h_channel = self.backend.numpy.where(
h_channel > 1.0, h_channel - 1.0, h_channel
)
h_channel = self.backend.numpy.where(
h_channel < 0.0, h_channel + 1.0, h_channel
)
images = self.backend.numpy.stack(
[h_channel, images[..., 1], images[..., 2]], axis=-1
)
images = self.backend.image.hsv_to_rgb(
images, data_format=self.data_format
)
images = self.backend.numpy.clip(images, 0, 1)
images = self._transform_value_range(
images, (0, 1), self.value_range
)
images = self.backend.cast(images, self.compute_dtype)
return images
if training:
images = _apply_random_hue(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| RandomHue |
python | python-markdown__markdown | markdown/treeprocessors.py | {
"start": 1898,
"end": 2614
} | class ____(util.Processor):
"""
`Treeprocessor`s are run on the `ElementTree` object before serialization.
Each `Treeprocessor` implements a `run` method that takes a pointer to an
`Element` and modifies it as necessary.
`Treeprocessors` must extend `markdown.Treeprocessor`.
"""
def run(self, root: etree.Element) -> etree.Element | None:
"""
Subclasses of `Treeprocessor` should implement a `run` method, which
takes a root `Element`. This method can return another `Element`
object, and the existing root `Element` will be replaced, or it can
modify the current tree and return `None`.
"""
pass # pragma: no cover
| Treeprocessor |
python | ApeWorX__ape | tests/functional/test_transaction.py | {
"start": 17136,
"end": 18931
} | class ____:
@pytest.mark.parametrize(
"address",
(
"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
HexBytes("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"),
),
)
def test_address(self, address):
actual = AccessList(address=address, storageKeys=[])
assert actual.address == "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
@pytest.mark.parametrize("storage_key", (123, HexBytes(123), "0x0123"))
def test_storage_keys(self, storage_key, zero_address):
actual = AccessList(address=zero_address, storageKeys=[storage_key])
assert actual.storage_keys == [HexBytes(storage_key)]
def test_override_annotated_fields():
"""
This test is to prove that a user may use an `int` for a base-class
when the API field is described as a `HexInt`.
"""
class MyTransaction(TransactionAPI):
@property
def txn_hash(self) -> HexBytes:
return HexBytes("")
def serialize_transaction(self) -> bytes:
return b""
chain_id: Optional[int] = None # The base type is `Optional[HexInt]`.
chain_id = 123123123123123123123123123123
tx_type = 120
my_tx = MyTransaction.model_validate({"chain_id": chain_id, "type": tx_type})
assert my_tx.chain_id == chain_id
assert my_tx.type == tx_type
@pytest.mark.parametrize("key", ("gas_limit", "gas"))
def test_gas(ethereum, key):
kwargs = {key: 123}
tx = ethereum.create_transaction(**kwargs)
assert tx.gas_limit == 123
# Show the `gas` alias works.
assert tx.gas == 123
@pytest.mark.parametrize("val", (2, "0x2"))
def test_gas_setter(ethereum, val):
tx = ethereum.create_transaction()
tx.gas = val
assert tx.gas_limit == 2
assert tx.gas == 2
| TestAccessList |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 10010,
"end": 12046
} | class ____(torch.Tensor):
"""
Class used to verify guarding on the subclass metadata
"""
@staticmethod
def __new__(cls, a, constant):
shape = a.shape
kwargs = {}
kwargs["strides"] = a.stride()
kwargs["storage_offset"] = a.storage_offset()
kwargs["device"] = a.device
kwargs["layout"] = a.layout
kwargs["requires_grad"] = a.requires_grad
kwargs["dtype"] = a.dtype
out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
return out
def __init__(self, a, constant):
self.a = a
self.constant = constant
def __repr__(self):
a_repr = repr(self.a)
return f"CtxSubclassTensor({a_repr})"
def __tensor_flatten__(self):
return ["a"], (self.constant,)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, sizes, strides):
constant = meta[0]
a = inner_tensors["a"]
return CtxSubclassTensor(a, constant)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
biggest_constant = max(
[
x.constant
for x in pytree.tree_flatten(args)[0]
if isinstance(x, CtxSubclassTensor)
]
)
args_a = pytree.tree_map(
lambda x: x.a if isinstance(x, CtxSubclassTensor) else x, args
)
kwargs_a = pytree.tree_map(
lambda x: x.a if isinstance(x, CtxSubclassTensor) else x, kwargs
)
out_a = func(*args_a, **kwargs_a)
out = pytree.tree_map(
lambda x: (
CtxSubclassTensor(x, biggest_constant)
if isinstance(x, torch.Tensor)
else x
),
out_a,
)
if func == torch.ops.aten.mul.Tensor:
out = out + out.constant
return return_and_correct_aliasing(func, args, kwargs, out)
def func(a):
return a.sin()
| CtxSubclassTensor |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/confpage.py | {
"start": 387,
"end": 1967
} | class ____(PluginConfigPage):
def setup_page(self):
filter_group = QGroupBox(_("Filter"))
filter_data = [
('exclude_private',
_("Exclude private references")),
('exclude_capitalized',
_("Exclude capitalized references")),
('exclude_uppercase',
_("Exclude all-uppercase references")),
('exclude_unsupported',
_("Exclude unsupported data types")),
('exclude_callables_and_modules',
_("Exclude callables and modules"))
]
filter_boxes = [
self.create_checkbox(
text,
option,
tip='Excludes variables that fit the description'
)
for option, text in filter_data
]
display_group = QGroupBox(_("Display"))
display_data = [('minmax', _("Show arrays min/max"), '')]
display_boxes = [self.create_checkbox(text, option, tip=tip)
for option, text, tip in display_data]
filter_layout = QVBoxLayout()
for box in filter_boxes:
filter_layout.addWidget(box)
filter_group.setLayout(filter_layout)
display_layout = QVBoxLayout()
for box in display_boxes:
display_layout.addWidget(box)
display_group.setLayout(display_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(filter_group)
vlayout.addWidget(display_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
| VariableExplorerConfigPage |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_wrap_model_call.py | {
"start": 44012,
"end": 46420
} | class ____:
"""Test sync/async interoperability."""
def test_sync_invoke_with_only_async_middleware_raises_error(self) -> None:
"""Test that sync invoke with only async middleware raises error."""
class AsyncOnlyMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[AIMessage]],
) -> AIMessage:
return await handler(request)
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[AsyncOnlyMiddleware()],
)
with pytest.raises(NotImplementedError):
agent.invoke({"messages": [HumanMessage("hello")]})
def test_sync_invoke_with_mixed_middleware(self) -> None:
"""Test that sync invoke works with mixed sync/async middleware when sync versions exist."""
calls = []
class MixedMiddleware(AgentMiddleware):
def before_model(self, state, runtime) -> None:
calls.append("MixedMiddleware.before_model")
async def abefore_model(self, state, runtime) -> None:
calls.append("MixedMiddleware.abefore_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], AIMessage],
) -> AIMessage:
calls.append("MixedMiddleware.wrap_model_call")
return handler(request)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[AIMessage]],
) -> AIMessage:
calls.append("MixedMiddleware.awrap_model_call")
return await handler(request)
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[MixedMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("hello")]})
# In sync mode, only sync methods should be called
assert calls == [
"MixedMiddleware.before_model",
"MixedMiddleware.wrap_model_call",
]
| TestSyncAsyncInterop |
python | getsentry__sentry | src/sentry/notifications/models/notificationaction.py | {
"start": 7121,
"end": 12250
} | class ____(AbstractNotificationAction):
"""
Generic notification action model to programmatically route depending on the trigger (or source) for the notification
"""
__relocation_scope__ = {RelocationScope.Global, RelocationScope.Organization}
__repr__ = sane_repr("id", "trigger_type", "service_type", "target_display")
_trigger_types: tuple[tuple[int, str], ...] = ActionTrigger.as_choices()
_registry: MutableMapping[str, type[ActionRegistration]] = {}
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField("sentry.Project", through=NotificationActionProject)
# The type of trigger which controls when the actions will go off (e.g. 'spike-protection')
trigger_type = models.SmallIntegerField(choices=_trigger_types)
class Meta:
app_label = "notifications"
db_table = "sentry_notificationaction"
@classmethod
def register_action(cls, trigger_type: int, service_type: int, target_type: int):
"""
Register a new trigger/service/target combination for NotificationActions.
For example, allowing audit-logs (trigger) to fire actions to slack (service) channels (target)
:param trigger_type: The registered trigger_type integer value saved to the database
:param service_type: The service_type integer value which must exist on ActionService
:param target_type: The target_type integer value which must exist on ActionTarget
:param registration: A subclass of `ActionRegistration`.
"""
def inner(registration: type[ActionRegistrationT]) -> type[ActionRegistrationT]:
if trigger_type not in dict(ActionTrigger.as_choices()):
raise AttributeError(
f"Trigger type of {trigger_type} is not registered. Modify ActionTrigger."
)
if service_type not in dict(ActionService.as_choices()):
raise AttributeError(
f"Service type of {service_type} is not registered. Modify ActionService."
)
if target_type not in dict(ActionTarget.as_choices()):
raise AttributeError(
f"Target type of {target_type} is not registered. Modify ActionTarget."
)
key = cls.get_registry_key(trigger_type, service_type, target_type)
if cls._registry.get(key) is not None:
raise AttributeError(
f"Existing registration found for trigger:{trigger_type}, service:{service_type}, target:{target_type}."
)
cls._registry[key] = registration
return registration
return inner
@classmethod
def get_trigger_types(cls):
return cls._trigger_types
@classmethod
def get_trigger_text(self, trigger_type: int) -> str:
return dict(NotificationAction.get_trigger_types())[trigger_type]
@classmethod
def get_registry_key(self, trigger_type: int, service_type: int, target_type: int) -> str:
return f"{trigger_type}:{service_type}:{target_type}"
@classmethod
def get_registry(cls) -> Mapping[str, type[ActionRegistration]]:
return cls._registry
@classmethod
def get_registration(
cls, trigger_type: int, service_type: int, target_type: int
) -> type[ActionRegistration] | None:
key = cls.get_registry_key(trigger_type, service_type, target_type)
return cls._registry.get(key)
def get_audit_log_data(self) -> dict[str, str]:
"""
Returns audit log data for NOTIFICATION_ACTION_ADD, NOTIFICATION_ACTION_EDIT
and NOTIFICATION_ACTION_REMOVE events
"""
return {"trigger": NotificationAction.get_trigger_text(self.trigger_type)}
def fire(self, *args, **kwargs):
registration = NotificationAction.get_registration(
self.trigger_type, self.service_type, self.target_type
)
if registration:
logger.info(
"fire_action",
extra={
"action_id": self.id,
"trigger": NotificationAction.get_trigger_text(self.trigger_type),
"service": ActionService.get_name(self.service_type),
"target": ActionTarget.get_name(self.target_type),
},
)
return registration(action=self).fire(*args, **kwargs)
else:
logger.error(
"missing_registration",
extra={
"id": self.id,
"service_type": self.service_type,
"trigger_type": self.trigger_type,
"target_type": self.target_type,
},
)
def get_relocation_scope(self) -> RelocationScope:
if self.integration_id is not None or self.sentry_app_id is not None:
# TODO(getsentry/team-ospo#188): this should be extension scope once that gets added.
return RelocationScope.Global
return RelocationScope.Organization
| NotificationAction |
python | scikit-learn__scikit-learn | sklearn/covariance/_shrunk_covariance.py | {
"start": 23335,
"end": 28079
} | class ____(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
(see [1]_).
The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
the original article, formula (23) states that 2/p (p being the number of
features) is multiplied by Trace(cov*cov) in both the numerator and
denominator, but this operation is omitted because for a large p, the value
of 2/p is so small that it doesn't affect the value of the estimator.
References
----------
.. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
<0907.4698>`
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import OAS
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> oas = OAS().fit(X)
>>> oas.covariance_
array([[0.7533, 0.2763],
[0.2763, 0.3964]])
>>> oas.precision_
array([[ 1.7833, -1.2431 ],
[-1.2431, 3.3889]])
>>> oas.shrinkage_
np.float64(0.0195)
See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py`
and :ref:`sphx_glr_auto_examples_covariance_plot_lw_vs_oas.py`
for more detailed examples.
"""
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = _oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| OAS |
python | pytorch__pytorch | test/dynamo/test_sets.py | {
"start": 21501,
"end": 21632
} | class ____(_SetBase, _BaseSetTests):
thetype = set
def test_in_frozenset(self):
super().test_in_frozenset()
| SetTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.