language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 5933,
"end": 6197
} | class ____(LocationValueError):
"""Raised when a URL input has an unsupported scheme."""
def __init__(self, scheme: str):
message = f"Not supported URL scheme {scheme}"
super().__init__(message)
self.scheme = scheme
| URLSchemeUnknown |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_diamond_base.py | {
"start": 685,
"end": 924
} | class ____(FlowSpec):
"""Second branch: different parameters and config"""
param_b = Parameter("param_b", help="Parameter from BaseB", default=50)
config_b = Config("config_b", default_value={"branch": "B", "weight": 2.5})
| BaseB |
python | getsentry__sentry | src/sentry/seer/endpoints/project_seer_preferences.py | {
"start": 2440,
"end": 2799
} | class ____(CamelSnakeSerializer):
repositories = RepositorySerializer(many=True, required=True)
automated_run_stopping_point = serializers.CharField(required=False, allow_null=True)
automation_handoff = SeerAutomationHandoffConfigurationSerializer(
required=False, allow_null=True
)
@region_silo_endpoint
| ProjectSeerPreferencesSerializer |
python | kamyu104__LeetCode-Solutions | Python/minimize-xor.py | {
"start": 59,
"end": 604
} | class ____(object):
def minimizeXor(self, num1, num2):
"""
:type num1: int
:type num2: int
:rtype: int
"""
def popcount(x):
return bin(x)[2:].count('1')
cnt1, cnt2 = popcount(num1), popcount(num2)
result = num1
cnt = abs(cnt1-cnt2)
expect = 1 if cnt1 >= cnt2 else 0
i = 0
while cnt:
if ((num1>>i)&1) == expect:
cnt -= 1
result ^= 1<<i
i += 1
return result
| Solution |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 5893,
"end": 7923
} | class ____(Enum):
# Single Line Basic
SLB = '"'
# Multi Line Basic
MLB = '"""'
# Single Line Literal
SLL = "'"
# Multi Line Literal
MLL = "'''"
@classmethod
def select(cls, literal=False, multiline=False) -> StringType:
return {
(False, False): cls.SLB,
(False, True): cls.MLB,
(True, False): cls.SLL,
(True, True): cls.MLL,
}[(literal, multiline)]
@property
def escaped_sequences(self) -> Collection[str]:
# https://toml.io/en/v1.0.0#string
escaped_in_basic = CONTROL_CHARS | {"\\"}
allowed_in_multiline = {"\n", "\r"}
return {
StringType.SLB: escaped_in_basic | {'"'},
StringType.MLB: (escaped_in_basic | {'"""'}) - allowed_in_multiline,
StringType.SLL: (),
StringType.MLL: (),
}[self]
@property
def invalid_sequences(self) -> Collection[str]:
# https://toml.io/en/v1.0.0#string
forbidden_in_literal = CONTROL_CHARS - {"\t"}
allowed_in_multiline = {"\n", "\r"}
return {
StringType.SLB: (),
StringType.MLB: (),
StringType.SLL: forbidden_in_literal | {"'"},
StringType.MLL: (forbidden_in_literal | {"'''"}) - allowed_in_multiline,
}[self]
@property
def unit(self) -> str:
return self.value[0]
def is_basic(self) -> bool:
return self in {StringType.SLB, StringType.MLB}
def is_literal(self) -> bool:
return self in {StringType.SLL, StringType.MLL}
def is_singleline(self) -> bool:
return self in {StringType.SLB, StringType.SLL}
def is_multiline(self) -> bool:
return self in {StringType.MLB, StringType.MLL}
def toggle(self) -> StringType:
return {
StringType.SLB: StringType.MLB,
StringType.MLB: StringType.SLB,
StringType.SLL: StringType.MLL,
StringType.MLL: StringType.SLL,
}[self]
| StringType |
python | getsentry__sentry | src/sentry/models/importchunk.py | {
"start": 3163,
"end": 3569
} | class ____(BaseImportChunk):
"""
Records the pk mapping for the successful import of instances of a model that lives in the
region silo.
"""
__relocation_scope__ = RelocationScope.Excluded
class Meta:
app_label = "sentry"
db_table = "sentry_regionimportchunk"
unique_together = (("import_uuid", "model", "min_ordinal"),)
@control_silo_model
| RegionImportChunk |
python | huggingface__transformers | src/transformers/models/musicgen/modeling_musicgen.py | {
"start": 4261,
"end": 7414
} | class ____(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int):
super().__init__()
self.embedding_dim = embedding_dim
self.make_weights(num_positions, embedding_dim)
def make_weights(self, num_embeddings: int, embedding_dim: int):
emb_weights = self.get_embedding(num_embeddings, embedding_dim)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int):
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
bsz, codebooks, seq_len = input_ids.size()
# Create the position ids from the input token ids.
position_ids = (torch.arange(seq_len) + past_key_values_length).to(input_ids.device)
# expand embeddings if needed
if seq_len > self.weights.size(0):
self.make_weights(seq_len, self.embedding_dim)
return self.weights.index_select(0, position_ids.view(-1)).detach()
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| MusicgenSinusoidalPositionalEmbedding |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 912432,
"end": 914273
} | class ____(sgqlc.types.Type):
"""A group of emoji reactions to a particular piece of content."""
__schema__ = github_schema
__field_names__ = ("content", "created_at", "reactors", "subject", "viewer_has_reacted")
content = sgqlc.types.Field(sgqlc.types.non_null(ReactionContent), graphql_name="content")
"""Identifies the emoji reaction."""
created_at = sgqlc.types.Field(DateTime, graphql_name="createdAt")
"""Identifies when the reaction was created."""
reactors = sgqlc.types.Field(
sgqlc.types.non_null("ReactorConnection"),
graphql_name="reactors",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Reactors to the reaction subject with the emotion represented by
this reaction group.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
subject = sgqlc.types.Field(sgqlc.types.non_null(Reactable), graphql_name="subject")
"""The subject that was reacted to."""
viewer_has_reacted = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerHasReacted")
"""Whether or not the authenticated user has left a reaction on the
subject.
"""
| ReactionGroup |
python | ultrajson__ultrajson | tests/fuzz.py | {
"start": 2316,
"end": 6296
} | class ____(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
values = tuple(map(int, re.findall("[^, ]+", values)))
setattr(namespace, self.dest, values)
parser = argparse.ArgumentParser(
epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--seed",
default=range(100),
action=RangeOption,
dest="seeds",
help="A seed or range of seeds (in the form start:end[:step]) "
"to initialise the randomizer.",
)
parser.add_argument(
"--indent",
default=(0, 1, 2, 3, 4, 5, 12, 100, 1000),
action=ListOption,
help="A comma separated sequence of indentation lengths to test.",
)
parser.add_argument(
"--ensure_ascii",
default=(0, 1),
action=ListOption,
help="Sets the ensure_ascii option to ujson.dumps(). "
"May be 0 or 1 or 0,1 to testboth.",
)
parser.add_argument(
"--encode_html_chars",
default=(0, 1),
action=ListOption,
help="Sets the encode_html_chars option to ujson.dumps(). "
"May be 0 or 1 or 0,1 to test both.",
)
parser.add_argument(
"--escape_forward_slashes",
default=(0, 1),
action=ListOption,
help="Sets the escape_forward_slashes option to ujson.dumps(). "
"May be 0 or 1 or 0,1 to test both.",
)
parser.add_argument(
"--dump-python",
action="store_true",
help="Print the randomly generated object as a Python literal and exit.",
)
parser.add_argument(
"--dump-json",
action="store_true",
help="Print the randomly generated object in JSON format and exit.",
)
def cli(args=None):
options = dict(parser.parse_args(args)._get_kwargs())
if options.pop("dump_json"):
print(json.dumps(random_object(options["seeds"][0]), indent=2))
elif options.pop("dump_python"):
pprint(random_object(options["seeds"][0]))
else:
fuzz(**options)
def fuzz(seeds, **options):
try:
for seed in seeds:
data = random_object(seed)
for permutation in itertools.product(*options.values()):
_options = dict(zip(options.keys(), permutation))
print(f"--seed {seed}", *(f"--{k} {v}" for (k, v) in _options.items()))
data_objects = collect_all_objects(data)
# Exclude ints because they get referenced by the lists below.
data_objects = [o for o in data_objects if not isinstance(o, int)]
gc.collect()
data_ref_counts_before = [sys.getrefcount(o) for o in data_objects]
ujson.dumps(data, **_options)
gc.collect()
data_ref_counts_after = [sys.getrefcount(o) for o in data_objects]
if data_ref_counts_before != data_ref_counts_after:
for o, before, after in zip(
data_objects, data_ref_counts_before, data_ref_counts_after
):
if before != after:
print(f"Ref count of {o!r} went from {before} to {after}")
raise ValueError("ref counts changed")
except KeyboardInterrupt:
pass
def collect_all_objects(obj):
"""Given an object, return a list of all objects referenced by it."""
if hasattr(sys, "pypy_version_info"):
# PyPy's GC works differently (no ref counting), so this wouldn't be useful.
# Simply returning an empty list effectively disables the refcount test.
return []
def _inner(o):
yield o
if isinstance(o, list):
for v in o:
yield from _inner(v)
elif isinstance(o, dict):
for k, v in o.items():
yield from _inner(k)
yield from _inner(v)
out = []
seen = set()
for o in _inner(obj):
if id(o) not in seen:
seen.add(id(o))
out.append(o)
return out
if __name__ == "__main__":
cli()
| ListOption |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/pseudoGeneric2.py | {
"start": 403,
"end": 1032
} | class ____(Handler):
def __init__(self, a, b="hello", level=NOTSET):
super().__init__(level)
self._foo_a = a
self._foo_b = b
@property
def value_a(self):
return self._foo_a
@property
def value_b(self):
return self._foo_b
a1 = ClassA(27)
reveal_type(a1.value_a, expected_text="int")
reveal_type(a1.value_b, expected_text="str")
a2 = ClassA("hello", "27")
reveal_type(a2.value_a, expected_text="str")
reveal_type(a2.value_b, expected_text="str")
# This should generate an error because a pseudo-generic
# class is not actually generic.
a3: ClassA[int, str, int]
| ClassA |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator_test.py | {
"start": 41819,
"end": 59316
} | class ____(test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(StrategyIntegrationTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=1, num_ps=1)
cls.strategy = cls.coordinator.strategy
def testRunNotUsedWithClusterCoordinatorSchedule(self):
@def_function.function
def input_fn():
return dataset_ops.DatasetV2.range(1, 3)
with self.strategy.scope():
v = variables.Variable(initial_value=1, dtype=dtypes.int64)
def replica_fn(input_tensor):
return input_tensor + v, input_tensor - v
@def_function.function
def worker_fn(iterator):
return self.strategy.run(replica_fn, args=(next(iterator),))
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
@contextlib.contextmanager
def _assert_logs_usage_warning():
with self.assertLogs(level='WARNING') as logs:
yield
self.assertIn(
'A `tf.distribute.experimental.ParameterServerStrategy` method is '
'invoked without using `ClusterCoordinator.schedule`. If you are not '
'tracing a tf.function, this method is possibly executed on the '
'coordinator, which can be slow. To properly dispatch functions to '
'run on workers, methods like `run` or `reduce` should be used '
'within a function passed to `tf.distribute.experimental.coordinator.'
'ClusterCoordinator.schedule`.', logs.output[0])
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
# A proper `schedule` should succeed.
rv = self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` again should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
all_results = [(2, 0)] * self.strategy.num_replicas_in_sync
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append(all_results[i])
self.assertAllEqual(
tuple(expected_result),
self.strategy.experimental_local_results(rv.fetch()))
def testBasicVariableAssignment(self):
self.strategy.extended._variable_count = 0
with self.strategy.scope():
v1 = variables.Variable(initial_value=0.0)
v2 = variables.Variable(initial_value=1.0)
self.assertEqual(self.strategy.extended._variable_count, 2)
@def_function.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
results = self.coordinator.schedule(worker_fn)
logging.info('Results of experimental_run_v2: %f',
self.coordinator.fetch(results))
self.assertAlmostEqual(v1.read_value().numpy(), 0.1, delta=1e-6)
self.assertAlmostEqual(v2.read_value().numpy(), 0.8, delta=1e-6)
def testRunAndReduce(self):
self.assertFalse(distribute_lib.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribute_lib.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribute_lib.in_cross_replica_context())
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
def testRunAndReduceWithAssignAdd(self):
self.assertFalse(distribute_lib.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribute_lib.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
v1 = variables.Variable(
initial_value=0.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribute_lib.in_cross_replica_context())
v1.assign_add(input_tensor)
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
self.assertEqual(v1, 6.)
def testVariableAggregation(self):
self.assertFalse(distribute_lib.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribute_lib.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.SUM)
@def_function.function
def worker_fn():
def replica_fn():
value = math_ops.cast(
distribute_lib.get_replica_context()
.replica_id_in_sync_group + 1, v.dtype)
v.assign(value)
self.strategy.run(replica_fn)
self.coordinator.schedule(worker_fn)
self.coordinator.join()
expected_result = 0.
for i in range(self.strategy.num_replicas_in_sync):
expected_result = expected_result + i + 1
self.assertEqual(v, expected_result)
def testVariableCaching(self):
self.assertFalse(distribute_lib.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribute_lib.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
# Test read value inside caching scope
with distribute_utils.cache_variable_reads():
v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
self.assertEqual(v.read_value(), 1.0) # should be cached 1.0 value.
# Reset v to 2.0
v.assign(2.0)
# Test convert to tensor value inside caching scope
with distribute_utils.cache_variable_reads():
t = v * 3.0
self.assertEqual(t, 6.0)
v.assign(3.0)
t1 = v * 3.0
self.assertEqual(t1, 6.0) # should be cached 2.0 * 3.0 value.
# Reset v to 1.0
v.assign(1.0)
# Verify caching scope inside tf.function
@def_function.function
def worker_fn():
with distribute_utils.cache_variable_reads():
def replica_fn():
t = v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return 1.0
return t # Should be 1.0 instead of 5.0
return self.strategy.run(replica_fn)
result = self.coordinator.schedule(worker_fn)
result = result.fetch()
expected_result = 1.
self.assertEqual(result, expected_result)
# Verify that v.read_value works as expected outside of scope.
v.assign(4.0)
self.assertEqual(v.read_value(), 4.0)
v.assign(constant_op.constant(2.0)) # v changes to 2.0
# Check with scope outside of tf function and check that cache is reset
@def_function.function
def worker_fn1():
def replica_fn():
t = v.read_value() # Reads value 2.0 ==> Should be cached
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return cached value 2.0
return t # Should be 2.0 instead of 5.0
return self.strategy.run(replica_fn)
with distribute_utils.cache_variable_reads():
result = self.coordinator.schedule(worker_fn1)
result = result.fetch()
expected_result = 2.
self.assertEqual(result, expected_result)
# Verify scope nesting is not permitted.
with self.assertRaises(ValueError):
with distribute_utils.cache_variable_reads():
with distribute_utils.cache_variable_reads():
v.read_value()
@parameterized.parameters(True, False)
def testDistributedDatasetInsidePerWorkerDatasetFn(self, from_function):
if from_function:
def per_worker_dataset_fn():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.distribute_datasets_from_function(dataset_fn)
else:
def per_worker_dataset_fn():
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
per_worker_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(per_worker_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
@parameterized.parameters(True, False)
def testPassDistributedDatasetToCreatePerWorkerDataset(self, from_function):
if from_function:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(1, 11).batch(4)
distributed_dataset = self.strategy.distribute_datasets_from_function(
dataset_fn)
else:
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
distributed_dataset = self.strategy.experimental_distribute_dataset(
dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
per_worker_dataset = self.coordinator.create_per_worker_dataset(
distributed_dataset)
result = self.coordinator.schedule(
worker_fn, args=(iter(per_worker_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
def testDistributeDatasetsFromFunction(self):
def per_worker_dataset_fn():
def input_worker_device_fn(input_context):
self.assertIsNotNone(input_context)
return dataset_ops.DatasetV2.range(1, 11).batch(1)
return self.strategy.distribute_datasets_from_function(
input_worker_device_fn)
@def_function.function
def worker_fn(iterator):
result = self.strategy.experimental_local_results(next(iterator))
return result
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append([1 + i])
self.assertAllEqual(result, expected_result)
def testAsyncScheduleWithDistributedDataset(self):
def input_fn():
dataset = dataset_ops.DatasetV2.from_tensor_slices([2.]).repeat().batch(
self.strategy.num_replicas_in_sync)
return self.strategy.experimental_distribute_dataset(dataset)
with self.strategy.scope():
v = variables.Variable(initial_value=[0], dtype=dtypes.float32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
# Reduce to convert PerReplica values to single value
reduced_value = self.strategy.reduce('MEAN', x, axis=None)
v.assign_add(reduced_value)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertAllEqual(v.read_value(), (0,))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[10]]))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[20]]))
def testInputFunctionWithMapWithDistributedDataset(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
dataset = dataset_ops.DatasetV2.range(0, 10).batch(
self.strategy.num_replicas_in_sync).map(map_fn)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
expected_result = array_ops.split(
math_ops.range(10., 10. + self.strategy.num_replicas_in_sync),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(
self.strategy.experimental_local_results(result.fetch()),
tuple(expected_result))
self.assertEqual(self._map_fn_tracing_count, 1)
def testPerWorkerDistributeDatasetsElementSpec(self):
def per_worker_dataset_fn():
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 2]))
dataset = dataset_ops.DatasetV2.from_tensor_slices([1, 2])
per_worker_distribute_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
self.assertAllEqual(
# Converts to PerReplicaSpec when num_replicas_in_sync are > 1
input_lib._create_distributed_tensor_spec(self.strategy,
dataset.element_spec),
per_worker_distribute_dataset.element_spec)
def testPerWorkerDistributedIteratorTypeSpec(self):
self._tracing_count = 0
def per_worker_dataset_fn():
self._tracing_count += 1
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.range(1, 2))
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_iterator = iter(
self.coordinator.create_per_worker_dataset(per_worker_dataset_fn))
worker_fn.get_concrete_function(distributed_iterator)
self.coordinator.schedule(worker_fn, args=(distributed_iterator,))
self.assertEqual(self._tracing_count, 1)
def testPerWorkerDatasetBuild(self):
# Test that we can use Dataset type as input to worker_fn.
@def_function.function
def worker_fn(dataset):
return next(iter(dataset))
dataset_vals = [1, 2]
dataset = dataset_ops.DatasetV2.from_tensor_slices(dataset_vals)
per_worker_dataset = self.coordinator.create_per_worker_dataset(dataset)
per_worker_dataset = per_worker_dataset.build()
result = self.coordinator.schedule(worker_fn, args=(per_worker_dataset,))
self.coordinator.join()
result = result.fetch()
self.assertEqual(result, dataset_vals[0])
# Test that the build() output type specs match the input Dataset spec.
for value in per_worker_dataset._values:
self.assertEqual(value._type_spec, dataset._type_spec)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| StrategyIntegrationTest |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/metadata.py | {
"start": 6859,
"end": 9325
} | class ____(MetadataCheck):
"""
Verify that _if_ the most recent connector version has a breaking change,
it's deadline is at least a week in the future.
"""
name = "Breaking change deadline should be a week in the future"
description = "If the connector version has a breaking change, the deadline field must be set to at least a week in the future."
runs_on_released_connectors = False
minimum_days_until_deadline = 7
def _run(self, connector: Connector) -> CheckResult:
# fetch the current branch version of the connector first.
# we'll try and see if there are any breaking changes associated
# with it next.
current_version = connector.version
if current_version is None:
return self.fail(
connector=connector,
message="Can't verify breaking changes deadline: connector version is not defined.",
)
breaking_changes = connector.metadata.get("releases", {}).get("breakingChanges")
if not breaking_changes:
return self.pass_(
connector=connector,
message="No breaking changes found on this connector.",
)
current_version_breaking_changes = breaking_changes.get(current_version)
if not current_version_breaking_changes:
return self.pass_(
connector=connector,
message="No breaking changes found for the current version.",
)
upgrade_deadline = current_version_breaking_changes.get("upgradeDeadline")
if not upgrade_deadline:
return self.fail(
connector=connector,
message=f"No upgrade deadline found for the breaking changes in {current_version}.",
)
upgrade_deadline_datetime = datetime.strptime(upgrade_deadline, "%Y-%m-%d")
one_week_from_now = datetime.utcnow() + timedelta(days=self.minimum_days_until_deadline)
if upgrade_deadline_datetime <= one_week_from_now:
return self.fail(
connector=connector,
message=f"The upgrade deadline for the breaking changes in {current_version} is less than {self.minimum_days_until_deadline} days from today. Please extend the deadline",
)
return self.pass_(connector=connector, message="The upgrade deadline is set to at least a week in the future")
| ValidateBreakingChangesDeadlines |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 251584,
"end": 252129
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of MarkPullRequestReadyForReview"""
__schema__ = github_schema
__field_names__ = ("pull_request_id", "client_mutation_id")
pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId")
"""ID of the pull request to be marked as ready for review."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| MarkPullRequestReadyForReviewInput |
python | pandas-dev__pandas | pandas/io/formats/string.py | {
"start": 369,
"end": 6708
} | class ____:
"""Formatter for string representation of a dataframe."""
def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None:
self.fmt = fmt
self.adj = fmt.adj
self.frame = fmt.frame
self.line_width = line_width
def to_string(self) -> str:
text = self._get_string_representation()
if self.fmt.should_show_dimensions:
text = f"{text}{self.fmt.dimensions_info}"
return text
def _get_strcols(self) -> list[list[str]]:
strcols = self.fmt.get_strcols()
if self.fmt.is_truncated:
strcols = self._insert_dot_separators(strcols)
return strcols
def _get_string_representation(self) -> str:
if self.fmt.frame.empty:
return self._empty_info_line
strcols = self._get_strcols()
if self.line_width is None:
# no need to wrap around just print the whole frame
return self.adj.adjoin(1, *strcols)
if self._need_to_wrap_around:
return self._join_multiline(strcols)
return self._fit_strcols_to_terminal_width(strcols)
@property
def _empty_info_line(self) -> str:
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {pprint_thing(self.frame.columns)}\n"
f"Index: {pprint_thing(self.frame.index)}"
)
@property
def _need_to_wrap_around(self) -> bool:
return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:
str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
index_length = len(str_index)
if self.fmt.is_truncated_horizontally:
strcols = self._insert_dot_separator_horizontal(strcols, index_length)
if self.fmt.is_truncated_vertically:
strcols = self._insert_dot_separator_vertical(strcols, index_length)
return strcols
@property
def _adjusted_tr_col_num(self) -> int:
return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
def _insert_dot_separator_horizontal(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)
return strcols
def _insert_dot_separator_vertical(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
n_header_rows = index_length - len(self.fmt.tr_frame)
row_num = self.fmt.tr_row_num
for ix, col in enumerate(strcols):
cwidth = self.adj.len(col[row_num])
if self.fmt.is_truncated_horizontally:
is_dot_col = ix == self._adjusted_tr_col_num
else:
is_dot_col = False
if cwidth > 3 or is_dot_col:
dots = "..."
else:
dots = ".."
if ix == 0 and self.fmt.index:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
col.insert(row_num + n_header_rows, dot_str)
return strcols
def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols_input)
if self.fmt.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
assert lwidth is not None
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
str_lst = []
start = 0
for i, end in enumerate(col_bins):
row = strcols[start:end]
if self.fmt.index:
row.insert(0, idx)
if nbins > 1:
nrows = len(row[-1])
if end <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
start = end
return "\n\n".join(str_lst)
def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:
from pandas import Series
lines = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(lines).str.len().max()
# plus truncate dot col
width, _ = get_terminal_size()
dif = max_len - width
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).str.len().max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = round(n_cols / 2)
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_fitted = n_cols - self.fmt.index
# GH-21180. Ensure that we print at least two.
max_cols_fitted = max(max_cols_fitted, 2)
self.fmt.max_cols_fitted = max_cols_fitted
# Call again _truncate to cut frame appropriately
# and then generate string representation
self.fmt.truncate()
strcols = self._get_strcols()
return self.adj.adjoin(1, *strcols)
def _binify(cols: list[int], line_width: int) -> list[int]:
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
| StringFormatter |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner_utils/exceptions.py | {
"start": 1125,
"end": 1563
} | class ____(ScriptControlException):
"""Silently stop and rerun the user's script."""
def __init__(self, rerun_data: RerunData) -> None:
"""Construct a RerunException.
Parameters
----------
rerun_data : RerunData
The RerunData that should be used to rerun the script
"""
self.rerun_data = rerun_data
def __repr__(self) -> str:
return repr_(self)
| RerunException |
python | pandas-dev__pandas | asv_bench/benchmarks/replace.py | {
"start": 42,
"end": 507
} | class ____:
params = [True, False]
param_names = ["inplace"]
def setup(self, inplace):
N = 10**6
rng = pd.date_range("1/1/2000", periods=N, freq="min")
data = np.random.randn(N)
data[::2] = np.nan
self.ts = pd.Series(data, index=rng)
def time_fillna(self, inplace):
self.ts.fillna(0.0, inplace=inplace)
def time_replace(self, inplace):
self.ts.replace(np.nan, 0.0, inplace=inplace)
| FillNa |
python | Textualize__textual | tests/test_layout_resolve.py | {
"start": 135,
"end": 3338
} | class ____(NamedTuple):
size: int | None = None
fraction: int = 1
min_size: int = 1
def test_empty():
assert layout_resolve(10, []) == []
def test_total_zero():
assert layout_resolve(0, [Edge(10)]) == [10]
def test_single():
# One edge fixed size
assert layout_resolve(100, [Edge(10)]) == [10]
# One edge fraction of 1
assert layout_resolve(100, [Edge(None, 1)]) == [100]
# One edge fraction 3
assert layout_resolve(100, [Edge(None, 2)]) == [100]
# One edge, fraction1, min size 20
assert layout_resolve(100, [Edge(None, 1, 20)]) == [100]
# One edge fraction 1, min size 120
assert layout_resolve(100, [Edge(None, 1, 120)]) == [120]
def test_two():
# Two edges fixed size
assert layout_resolve(100, [Edge(10), Edge(20)]) == [10, 20]
# Two edges, fixed size of one exceeds total
assert layout_resolve(100, [Edge(120), Edge(None, 1)]) == [120, 1]
# Two edges, fraction 1 each
assert layout_resolve(100, [Edge(None, 1), Edge(None, 1)]) == [50, 50]
# Two edges, one with fraction 2, one with fraction 1
# Note first value is rounded down, second is rounded up
assert layout_resolve(100, [Edge(None, 2), Edge(None, 1)]) == [66, 34]
# Two edges, both with fraction 2
assert layout_resolve(100, [Edge(None, 2), Edge(None, 2)]) == [50, 50]
# Two edges, one with fraction 3, one with fraction 1
assert layout_resolve(100, [Edge(None, 3), Edge(None, 1)]) == [75, 25]
# Two edges, one with fraction 3, one with fraction 1, second with min size of 30
assert layout_resolve(100, [Edge(None, 3), Edge(None, 1, 30)]) == [70, 30]
# Two edges, one with fraction 1 and min size 30, one with fraction 3
assert layout_resolve(100, [Edge(None, 1, 30), Edge(None, 3)]) == [30, 70]
@pytest.mark.parametrize(
"size, edges, result",
[
(10, [Edge(8), Edge(None, 0, 2), Edge(4)], [8, 2, 4]),
(10, [Edge(None, 1), Edge(None, 1), Edge(None, 1)], [3, 3, 4]),
(10, [Edge(5), Edge(None, 1), Edge(None, 1)], [5, 2, 3]),
(10, [Edge(None, 2), Edge(None, 1), Edge(None, 1)], [5, 2, 3]),
(10, [Edge(None, 2), Edge(3), Edge(None, 1)], [4, 3, 3]),
(
10,
[Edge(None, 2), Edge(None, 1), Edge(None, 1), Edge(None, 1)],
[4, 2, 2, 2],
),
(
10,
[Edge(None, 4), Edge(None, 1), Edge(None, 1), Edge(None, 1)],
[5, 2, 1, 2],
),
(2, [Edge(None, 1), Edge(None, 1), Edge(None, 1)], [1, 1, 1]),
(
2,
[
Edge(None, 1, min_size=5),
Edge(None, 1, min_size=4),
Edge(None, 1, min_size=3),
],
[5, 4, 3],
),
(
18,
[
Edge(None, 1, min_size=1),
Edge(3),
Edge(None, 1, min_size=1),
Edge(4),
Edge(None, 1, min_size=1),
Edge(5),
Edge(None, 1, min_size=1),
],
[1, 3, 2, 4, 1, 5, 2],
),
],
)
def test_multiple(size, edges, result):
assert layout_resolve(size, edges) == result
| Edge |
python | openai__openai-python | src/openai/types/moderation_create_params.py | {
"start": 410,
"end": 1032
} | class ____(TypedDict, total=False):
input: Required[Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]]]
"""Input (or inputs) to classify.
Can be a single string, an array of strings, or an array of multi-modal input
objects similar to other models.
"""
model: Union[str, ModerationModel]
"""The content moderation model you would like to use.
Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
[here](https://platform.openai.com/docs/models#moderation).
"""
| ModerationCreateParams |
python | keras-team__keras | keras/src/layers/convolutional/conv_test.py | {
"start": 26432,
"end": 34802
} | class ____(testing.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "causal",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_first",
"dilation_rate": 1,
"groups": 2,
},
)
def test_conv1d(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
):
layer = layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv1d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 4,
"kernel_size": 3,
"strides": 2,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 3),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (4, 3),
"strides": (2, 1),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (4, 3),
"strides": (2, 1),
"padding": "valid",
"data_format": "channels_first",
"dilation_rate": (1, 1),
"groups": 2,
},
)
def test_conv2d(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
):
layer = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv2d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
self.assertAllClose(outputs, expected, rtol=5e-4)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2, 2),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 3, 4),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"data_format": "channels_first",
"dilation_rate": (1, 1, 1),
"groups": 2,
},
)
def test_conv3d(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
):
layer = layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
inputs = np.random.normal(size=[2, 8, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv3d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
self.assertAllClose(outputs, expected, rtol=1e-3)
def test_conv_constraints(self):
layer = layers.Conv2D(
filters=4,
kernel_size=3,
kernel_constraint="non_neg",
)
layer.build((None, 5, 5, 3))
self.assertIsInstance(layer.kernel.constraint, constraints.NonNeg)
layer = layers.Conv2D(
filters=4,
kernel_size=3,
bias_constraint="non_neg",
)
layer.build((None, 5, 5, 3))
self.assertIsInstance(layer.bias.constraint, constraints.NonNeg)
def test_conv_raises_exception_on_zero_dims(self):
x = np.random.rand(3, 4, 4, 4)
l = layers.Conv2D(6, [5, 5], 1, "valid")
# The exception type can vary across backends (e.g., ValueError,
# tf.errors.InvalidArgumentError, RuntimeError).
with self.assertRaises(Exception):
l(x)
| ConvCorrectnessTest |
python | gevent__gevent | src/gevent/_tracer.py | {
"start": 5296,
"end": 6455
} | class ____(_HubTracer):
# A greenlet tracer that records the maximum time between switches,
# not including time spent in the hub.
def __init__(self, hub, max_blocking_time):
_HubTracer.__init__(self, hub, max_blocking_time)
self.last_switch = perf_counter()
self.max_blocking = 0
def _trace(self, event, args):
old_active = self.active_greenlet
GreenletTracer._trace(self, event, args)
if old_active is not self.hub and old_active is not None:
# If we're switching out of the hub, the blocking
# time doesn't count.
switched_at = perf_counter()
self.max_blocking = max(self.max_blocking,
switched_at - self.last_switch)
def did_block_hub(self, hub):
if self.max_blocking == 0:
# We never switched. Check the time now
self.max_blocking = perf_counter() - self.last_switch
if self.max_blocking > self.max_blocking_time:
return True, self.active_greenlet
from gevent._util import import_c_accel
import_c_accel(globals(), 'gevent.__tracer')
| MaxSwitchTracer |
python | astral-sh__uv | scripts/benchmark/src/benchmark/resolver.py | {
"start": 9891,
"end": 11762
} | class ____(Suite):
def __init__(self, *, python: str, path: str | None = None) -> None:
self.python = python
self.name = path or "pip-sync"
self.path = path or "pip-sync"
def resolve_cold(self, requirements_file: str, *, cwd: str) -> Command | None: ...
def resolve_warm(self, requirements_file: str, *, cwd: str) -> Command | None: ...
def resolve_incremental(
self, requirements_file: str, *, cwd: str
) -> Command | None: ...
def resolve_noop(self, requirements_file: str, *, cwd: str) -> Command | None: ...
def install_cold(self, requirements_file: str, *, cwd: str) -> Command | None:
cache_dir = os.path.join(cwd, ".cache")
venv_dir = os.path.join(cwd, ".venv")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_COLD.value})",
prepare=f"rm -rf {cache_dir} && virtualenv --clear -p {self.python} {venv_dir}",
command=[
self.path,
os.path.abspath(requirements_file),
"--pip-args",
f"--cache-dir {cache_dir}",
"--python-executable",
os.path.join(venv_dir, "bin", "python"),
],
)
def install_warm(self, requirements_file: str, *, cwd: str) -> Command | None:
cache_dir = os.path.join(cwd, ".cache")
venv_dir = os.path.join(cwd, ".venv")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_WARM.value})",
prepare=f"virtualenv --clear -p {self.python} {venv_dir}",
command=[
self.path,
os.path.abspath(requirements_file),
"--pip-args",
f"--cache-dir {cache_dir}",
"--python-executable",
os.path.join(venv_dir, "bin", "python"),
],
)
| PipSync |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 492329,
"end": 492949
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("PinnableItemEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("PinnableItem"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| PinnableItemConnection |
python | pennersr__django-allauth | allauth/socialaccount/admin.py | {
"start": 819,
"end": 1022
} | class ____(admin.ModelAdmin):
form = SocialAppForm
list_display = (
"name",
"provider",
)
filter_horizontal = ("sites",) if app_settings.SITES_ENABLED else ()
| SocialAppAdmin |
python | spack__spack | lib/spack/spack/util/ctest_log_parser.py | {
"start": 9574,
"end": 12263
} | class ____(LogEvent):
"""LogEvent subclass for build warnings."""
def chunks(xs, n):
"""Divide xs into n approximately-even chunks."""
chunksize = int(math.ceil(len(xs) / n))
return [xs[i : i + chunksize] for i in range(0, len(xs), chunksize)]
@contextmanager
def _time(times, i):
start = time.time()
yield
end = time.time()
times[i] += end - start
def _match(matches, exceptions, line):
"""True if line matches a regex in matches and none in exceptions."""
return any(m.search(line) for m in matches) and not any(e.search(line) for e in exceptions)
def _profile_match(matches, exceptions, line, match_times, exc_times):
"""Profiled version of match().
Timing is expensive so we have two whole functions. This is much
longer because we have to break up the ``any()`` calls.
"""
for i, m in enumerate(matches):
with _time(match_times, i):
if m.search(line):
break
else:
return False
for i, m in enumerate(exceptions):
with _time(exc_times, i):
if m.search(line):
return False
else:
return True
def _parse(lines, offset, profile):
def compile(regex_array):
return [re.compile(regex) for regex in regex_array]
error_matches = compile(_error_matches)
error_exceptions = compile(_error_exceptions)
warning_matches = compile(_warning_matches)
warning_exceptions = compile(_warning_exceptions)
file_line_matches = compile(_file_line_matches)
matcher, _ = _match, []
timings = []
if profile:
matcher = _profile_match
timings = [
[0.0] * len(error_matches),
[0.0] * len(error_exceptions),
[0.0] * len(warning_matches),
[0.0] * len(warning_exceptions),
]
errors = []
warnings = []
for i, line in enumerate(lines):
# use CTest's regular expressions to scrape the log for events
if matcher(error_matches, error_exceptions, line, *timings[:2]):
event = BuildError(line.strip(), offset + i + 1)
errors.append(event)
elif matcher(warning_matches, warning_exceptions, line, *timings[2:]):
event = BuildWarning(line.strip(), offset + i + 1)
warnings.append(event)
else:
continue
# get file/line number for each event, if possible
for flm in file_line_matches:
match = flm.search(line)
if match:
event.source_file, event.source_line_no = match.groups()
return errors, warnings, timings
def _parse_unpack(args):
return _parse(*args)
| BuildWarning |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_check_health.py | {
"start": 9334,
"end": 12915
} | class ____:
num_not_executed_checks: int
total_num_checks: int
AssetHealthCheckMetadata = Union[
AssetHealthCheckDegradedMetadata,
AssetHealthCheckWarningMetadata,
AssetHealthCheckUnknownMetadata,
]
async def get_asset_check_status_and_metadata(
context: "BaseWorkspaceRequestContext",
asset_key: AssetKey,
) -> tuple[AssetHealthStatus, Optional["AssetHealthCheckMetadata"]]:
"""Converts an AssetCheckHealthState object to a AssetHealthStatus and the metadata
needed to power the UIs and alerting.
"""
asset_check_health_state = await AssetCheckHealthState.gen(context, asset_key)
# captures streamline disabled or consumer state doesn't exist
if asset_check_health_state is None:
if context.instance.streamline_read_asset_health_required("asset-check-health"):
return AssetHealthStatus.UNKNOWN, None
# Note - this will only compute check health if there is a definition for the asset and checks in the
# asset graph. If check results are reported for assets or checks that are not in the asset graph, those
# results will not be picked up. If we add storage methods to get all check results for an asset by
# asset key, rather than by check keys, we could compute check health for the asset in this case.
if not context.asset_graph.has(
asset_key
) or not context.asset_graph.get_check_keys_for_assets({asset_key}):
return AssetHealthStatus.NOT_APPLICABLE, None
remote_check_nodes = context.asset_graph.get_checks_for_asset(asset_key)
asset_check_health_state = await AssetCheckHealthState.compute_for_asset_checks(
{remote_check_node.asset_check.key for remote_check_node in remote_check_nodes},
context,
)
if asset_check_health_state.health_status == AssetHealthStatus.HEALTHY:
return AssetHealthStatus.HEALTHY, None
if asset_check_health_state.health_status == AssetHealthStatus.WARNING:
return (
AssetHealthStatus.WARNING,
AssetHealthCheckWarningMetadata(
num_warning_checks=len(asset_check_health_state.warning_checks),
total_num_checks=len(asset_check_health_state.all_checks),
),
)
if asset_check_health_state.health_status == AssetHealthStatus.DEGRADED:
return (
AssetHealthStatus.DEGRADED,
AssetHealthCheckDegradedMetadata(
num_failed_checks=len(asset_check_health_state.failing_checks),
num_warning_checks=len(asset_check_health_state.warning_checks),
total_num_checks=len(asset_check_health_state.all_checks),
),
)
if asset_check_health_state.health_status == AssetHealthStatus.UNKNOWN:
return (
AssetHealthStatus.UNKNOWN,
AssetHealthCheckUnknownMetadata(
num_not_executed_checks=len(asset_check_health_state.all_checks)
- len(asset_check_health_state.passing_checks)
- len(asset_check_health_state.failing_checks)
- len(asset_check_health_state.warning_checks),
total_num_checks=len(asset_check_health_state.all_checks),
),
)
elif asset_check_health_state.health_status == AssetHealthStatus.NOT_APPLICABLE:
return AssetHealthStatus.NOT_APPLICABLE, None
else:
check.failed(
f"Unexpected asset check health status: {asset_check_health_state.health_status}"
)
| AssetHealthCheckUnknownMetadata |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 325835,
"end": 327405
} | class ____(Response):
"""
Response of tasks.get_configuration_names endpoint.
:param configurations: Names of task configuration items (keyed by task ID)
:type configurations: dict
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"configurations": {
"description": "Names of task configuration items (keyed by task ID)",
"properties": {
"names": {
"description": "Configuration names",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"type": ["object", "null"],
}
},
"type": "object",
}
def __init__(self, configurations: Optional[dict] = None, **kwargs: Any) -> None:
super(GetConfigurationNamesResponse, self).__init__(**kwargs)
self.configurations = configurations
@schema_property("configurations")
def configurations(self) -> Optional[dict]:
return self._property_configurations
@configurations.setter
def configurations(self, value: Optional[dict]) -> None:
if value is None:
self._property_configurations = None
return
self.assert_isinstance(value, "configurations", (dict,))
self._property_configurations = value
| GetConfigurationNamesResponse |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 12733,
"end": 13942
} | class ____(Configurable):
"""Transform a line of user input."""
priority = Integer(100).tag(config=True)
# Transformers don't currently use shell or prefilter_manager, but as we
# move away from checkers and handlers, they will need them.
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterTransformer, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_transformer(self)
def transform(self, line, continue_prompt):
"""Transform a line, returning the new one."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
#-----------------------------------------------------------------------------
# Prefilter checkers
#-----------------------------------------------------------------------------
| PrefilterTransformer |
python | pandas-dev__pandas | pandas/core/groupby/ops.py | {
"start": 18522,
"end": 34746
} | class ____:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
"""
axis: Index
def __init__(
self,
axis: Index,
groupings: list[grouper.Grouping],
sort: bool = True,
dropna: bool = True,
) -> None:
assert isinstance(axis, Index), axis
self.axis = axis
self._groupings = groupings
self._sort = sort
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
def __iter__(self) -> Iterator[Hashable]:
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(self, data: NDFrameT) -> Iterator[tuple[Hashable, NDFrameT]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data)
# TODO: Would be more efficient to skip unobserved for transforms
keys = self.result_index
yield from zip(keys, splitter, strict=True)
@final
def _get_splitter(self, data: NDFrame) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
"""
if isinstance(data, Series):
klass: type[DataSplitter] = SeriesSplitter
else:
# i.e. DataFrame
klass = FrameSplitter
return klass(
data,
self.ngroups,
sorted_ids=self._sorted_ids,
sort_idx=self.result_ilocs,
)
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""dict {group name -> group indices}"""
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
return get_indexer_dict(codes_list, self.levels)
@final
@cache_readonly
def result_ilocs(self) -> npt.NDArray[np.intp]:
"""
Get the original integer locations of result_index in the input.
"""
# Original indices are where group_index would go via sorting.
# But when dropna is true, we need to remove null values while accounting for
# any gaps that then occur because of them.
ids = self.ids
if self.has_dropped_na:
mask = np.where(ids >= 0)
# Count how many gaps are caused by previous null values for each position
null_gaps = np.cumsum(ids == -1)[mask]
ids = ids[mask]
result = get_group_index_sorter(ids, self.ngroups)
if self.has_dropped_na:
# Shift by the number of prior null gaps
result += np.take(null_gaps, result)
return result
@property
def codes(self) -> list[npt.NDArray[np.signedinteger]]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
if len(self.groupings) > 1:
# mypy doesn't know result_index must be a MultiIndex
return list(self.result_index.levels) # type: ignore[attr-defined]
else:
return [self.result_index]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids = self.ids
ngroups = self.ngroups
out: np.ndarray | list
if ngroups:
out = np.bincount(ids[ids != -1], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype="int64", copy=False)
@cache_readonly
def groups(self) -> dict[Hashable, Index]:
"""dict {group name -> group labels}"""
if len(self.groupings) == 1:
return self.groupings[0].groups
result_index, ids = self.result_index_and_ids
values = result_index._values
categories = Categorical.from_codes(ids, categories=range(len(result_index)))
result = {
# mypy is not aware that group has to be an integer
values[group]: self.axis.take(axis_ilocs) # type: ignore[call-overload]
for group, axis_ilocs in categories._reverse_indexer().items()
}
return result
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.ids).is_monotonic_increasing
@final
@cache_readonly
def has_dropped_na(self) -> bool:
"""
Whether grouper has null value(s) that are dropped.
"""
return bool((self.ids < 0).any())
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]:
# return the codes of items in original grouped axis
return self.ids
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def result_index(self) -> Index:
return self.result_index_and_ids[0]
@property
def ids(self) -> npt.NDArray[np.intp]:
return self.result_index_and_ids[1]
@cache_readonly
def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]:
levels = [Index._with_infer(ping.uniques) for ping in self.groupings]
obs = [
ping._observed or not ping._passed_categorical for ping in self.groupings
]
sorts = [ping._sort for ping in self.groupings]
# When passed a categorical grouping, keep all categories
for k, (ping, level) in enumerate(zip(self.groupings, levels, strict=True)):
if ping._passed_categorical:
levels[k] = level.set_categories(ping._orig_cats)
if len(self.groupings) == 1:
result_index = levels[0]
result_index.name = self.names[0]
ids = ensure_platform_int(self.codes[0])
elif all(obs):
result_index, ids = self._ob_index_and_ids(
levels, self.codes, self.names, sorts
)
elif not any(obs):
result_index, ids = self._unob_index_and_ids(levels, self.codes, self.names)
else:
# Combine unobserved and observed parts
names = self.names
codes = [ping.codes for ping in self.groupings]
ob_indices = [idx for idx, ob in enumerate(obs) if ob]
unob_indices = [idx for idx, ob in enumerate(obs) if not ob]
ob_index, ob_ids = self._ob_index_and_ids(
levels=[levels[idx] for idx in ob_indices],
codes=[codes[idx] for idx in ob_indices],
names=[names[idx] for idx in ob_indices],
sorts=[sorts[idx] for idx in ob_indices],
)
unob_index, unob_ids = self._unob_index_and_ids(
levels=[levels[idx] for idx in unob_indices],
codes=[codes[idx] for idx in unob_indices],
names=[names[idx] for idx in unob_indices],
)
result_index_codes = np.concatenate(
[
np.tile(unob_index.codes, len(ob_index)),
np.repeat(ob_index.codes, len(unob_index), axis=1),
],
axis=0,
)
_, index = np.unique(unob_indices + ob_indices, return_index=True)
result_index = MultiIndex(
levels=list(unob_index.levels) + list(ob_index.levels),
codes=result_index_codes,
names=list(unob_index.names) + list(ob_index.names),
).reorder_levels(index)
ids = len(unob_index) * ob_ids + unob_ids
if any(sorts):
# Sort result_index and recode ids using the new order
n_levels = len(sorts)
drop_levels = [
n_levels - idx
for idx, sort in enumerate(reversed(sorts), 1)
if not sort
]
if len(drop_levels) > 0:
sorter = result_index._drop_level_numbers(drop_levels).argsort()
else:
sorter = result_index.argsort()
result_index = result_index.take(sorter)
_, index = np.unique(sorter, return_index=True)
ids = ensure_platform_int(ids)
ids = index.take(ids)
else:
# Recode ids and reorder result_index with observed groups up front,
# unobserved at the end
ids, uniques = compress_group_index(ids, sort=False)
ids = ensure_platform_int(ids)
taker = np.concatenate(
[uniques, np.delete(np.arange(len(result_index)), uniques)]
)
result_index = result_index.take(taker)
return result_index, ids
@property
def observed_grouper(self) -> BaseGrouper:
if all(ping._observed for ping in self.groupings):
return self
return self._observed_grouper
@cache_readonly
def _observed_grouper(self) -> BaseGrouper:
groupings = [ping.observed_grouping for ping in self.groupings]
grouper = BaseGrouper(self.axis, groupings, sort=self._sort, dropna=self.dropna)
return grouper
def _ob_index_and_ids(
self,
levels: list[Index],
codes: list[npt.NDArray[np.intp]],
names: list[Hashable],
sorts: list[bool],
) -> tuple[MultiIndex, npt.NDArray[np.intp]]:
consistent_sorting = all(sorts[0] == sort for sort in sorts[1:])
sort_in_compress = sorts[0] if consistent_sorting else False
shape = tuple(len(level) for level in levels)
group_index = get_group_index(codes, shape, sort=True, xnull=True)
ob_ids, obs_group_ids = compress_group_index(group_index, sort=sort_in_compress)
ob_ids = ensure_platform_int(ob_ids)
ob_index_codes = decons_obs_group_ids(
ob_ids, obs_group_ids, shape, codes, xnull=True
)
ob_index = MultiIndex(
levels=levels,
codes=ob_index_codes,
names=names,
verify_integrity=False,
)
if not consistent_sorting and len(ob_index) > 0:
# Sort by the levels where the corresponding sort argument is True
n_levels = len(sorts)
drop_levels = [
n_levels - idx
for idx, sort in enumerate(reversed(sorts), 1)
if not sort
]
if len(drop_levels) > 0:
sorter = ob_index._drop_level_numbers(drop_levels).argsort()
else:
sorter = ob_index.argsort()
ob_index = ob_index.take(sorter)
_, index = np.unique(sorter, return_index=True)
ob_ids = np.where(ob_ids == -1, -1, index.take(ob_ids))
ob_ids = ensure_platform_int(ob_ids)
return ob_index, ob_ids
def _unob_index_and_ids(
self,
levels: list[Index],
codes: list[npt.NDArray[np.intp]],
names: list[Hashable],
) -> tuple[MultiIndex, npt.NDArray[np.intp]]:
shape = tuple(len(level) for level in levels)
unob_ids = get_group_index(codes, shape, sort=True, xnull=True)
unob_index = MultiIndex.from_product(levels, names=names)
unob_ids = ensure_platform_int(unob_ids)
return unob_index, unob_ids
@final
def get_group_levels(self) -> Generator[Index]:
# Note: only called from _insert_inaxis_grouper, which
# is only called for BaseGrouper, never for BinGrouper
result_index = self.result_index
if len(self.groupings) == 1:
yield result_index
else:
for level in range(result_index.nlevels - 1, -1, -1):
yield result_index.get_level_values(level)
# ------------------------------------------------------------
# Aggregation functions
@final
def _cython_operation(
self,
kind: str,
values,
how: str,
axis: AxisInt,
min_count: int = -1,
**kwargs,
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
assert kind in ["transform", "aggregate"]
cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)
return cy_op.cython_operation(
values=values,
axis=axis,
min_count=min_count,
comp_ids=self.ids,
ngroups=self.ngroups,
**kwargs,
)
@final
def agg_series(
self, obj: Series, func: Callable, preserve_dtype: bool = False
) -> ArrayLike:
"""
Parameters
----------
obj : Series
func : function taking a Series and returning a scalar-like
preserve_dtype : bool
Whether the aggregation is known to be dtype-preserving.
Returns
-------
np.ndarray or ExtensionArray
"""
result = self._aggregate_series_pure_python(obj, func)
return obj.array._cast_pointwise_result(result)
@final
def _aggregate_series_pure_python(
self, obj: Series, func: Callable
) -> npt.NDArray[np.object_]:
result = np.empty(self.ngroups, dtype="O")
initialized = False
splitter = self._get_splitter(obj)
for i, group in enumerate(splitter):
res = func(group)
res = extract_result(res)
if not initialized:
# We only do this validation on the first iteration
check_result_array(res, group.dtype)
initialized = True
result[i] = res
return result
@final
def apply_groupwise(
self, f: Callable, data: DataFrame | Series
) -> tuple[list, bool]:
mutated = False
splitter = self._get_splitter(data)
group_keys = self.result_index
result_values = []
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter, strict=True)
for key, group in zipped:
# Pinning name is needed for
# test_group_apply_once_per_group,
# test_inconsistent_return_type, test_set_group_name,
# test_group_name_available_in_inference_pass,
# test_groupby_multi_timezone
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not mutated and not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
# getattr pattern for __name__ is needed for functools.partial objects
if len(group_keys) == 0 and getattr(f, "__name__", None) in [
"skew",
"kurt",
"sum",
"prod",
]:
# If group_keys is empty, then no function calls have been made,
# so we will not have raised even if this is an invalid dtype.
# So do one dummy call here to raise appropriate TypeError.
f(data.iloc[:0])
return result_values, mutated
# ------------------------------------------------------------
# Methods for sorting subsets of our GroupBy's object
@final
@cache_readonly
def _sorted_ids(self) -> npt.NDArray[np.intp]:
result = self.ids.take(self.result_ilocs)
if getattr(self, "dropna", True):
# BinGrouper has no dropna
result = result[result >= 0]
return result
| BaseGrouper |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 168375,
"end": 170963
} | class ____(OrganizationDashboardDetailsTestCase):
def setUp(self) -> None:
super().setUp()
# Create two additional users
self.user_1 = self.create_user(email="user1@example.com")
self.user_2 = self.create_user(email="user2@example.com")
self.create_member(user=self.user_1, organization=self.organization)
self.create_member(user=self.user_2, organization=self.organization)
# Both users have favorited the dashboard
self.dashboard.favorited_by = [self.user_1.id, self.user_2.id]
def url(self, dashboard_id):
return reverse(
"sentry-api-0-organization-dashboard-favorite",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": dashboard_id,
},
)
# PUT tests
def test_favorite_dashboard(self) -> None:
assert self.user.id not in self.dashboard.favorited_by
self.login_as(user=self.user)
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": "true"})
assert response.status_code == 204
assert self.user.id in self.dashboard.favorited_by
def test_unfavorite_dashboard(self) -> None:
assert self.user_1.id in self.dashboard.favorited_by
self.login_as(user=self.user_1)
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": False})
assert response.status_code == 204
assert self.user_1.id not in self.dashboard.favorited_by
def test_favorite_dashboard_no_dashboard_edit_access(self) -> None:
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=self.dashboard)
self.login_as(user=self.user_2)
dashboard_detail_url = reverse(
"sentry-api-0-organization-dashboard-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": self.dashboard.id,
},
)
response = self.do_request("put", dashboard_detail_url, data={"title": "New Dashboard 9"})
# assert user cannot edit dashboard
assert response.status_code == 403
# assert if user can edit the favorite status of the dashboard
assert self.user_2.id in self.dashboard.favorited_by
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": False})
assert response.status_code == 204
assert self.user_2.id not in self.dashboard.favorited_by
| OrganizationDashboardFavoriteTest |
python | google__pytype | pytype/tests/test_quick1.py | {
"start": 63,
"end": 2536
} | class ____(test_base.BaseTest):
"""Tests for --quick."""
def test_max_depth(self):
ty = self.Infer(
"""
class Foo:
def __init__(self, elements):
assert all(e for e in elements)
self.elements = elements
def bar(self):
return self.elements
""",
quick=True,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class Foo:
elements = ... # type: Any
def __init__(self, elements: Any) -> None: ...
def bar(self) -> Any: ...
""",
)
def test_closure(self):
ty = self.Infer(
"""
def f():
class A: pass
return {A: A()}
""",
quick=True,
maximum_depth=1,
)
self.assertTypesMatchPytd(
ty,
"""
def f() -> dict: ...
""",
)
def test_init(self):
# Tests that it's possible for --quick to handle this case with a large
# enough maximum depth, even though it can't currently due to
# QUICK_INFER_MAXIMUM_DEPTH being 1.
ty = self.Infer(
"""
class A:
def __init__(self):
self.real_init()
def real_init(self):
self.x = 42
def f(self):
return self.x
def f():
return A().f()
""",
quick=True,
maximum_depth=2,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
x = ... # type: int
def __init__(self) -> None: ...
def real_init(self) -> None: ...
def f(self) -> int: ...
def f() -> Any: ...
""",
)
def test_analyze_annotated_max_depth(self):
# --output with --analyze-annotated has the same max depth as --check.
_, errors = self.InferWithErrors(
"""
def make_greeting(user_id):
return 'hello, user' + user_id # unsupported-operands[e]
def print_greeting():
print(make_greeting(0))
""",
quick=True,
)
self.assertErrorRegexes(errors, {"e": r"str.*int"})
def test_max_depth_and_property(self):
self.Check(
"""
class C:
def __init__(self):
self.f()
def f(self):
pass
@property
def x(self) -> int:
return 0
def g(self):
assert_type(self.x, int)
""",
quick=True,
maximum_depth=1,
)
if __name__ == "__main__":
test_base.main()
| QuickTest |
python | pallets__werkzeug | examples/coolmagic/utils.py | {
"start": 1461,
"end": 1806
} | class ____(BaseRequest):
"""
The concrete request object used in the WSGI application.
It has some helper functions that can be used to build URLs.
"""
charset = "utf-8"
def __init__(self, environ, url_adapter):
super().__init__(environ)
self.url_adapter = url_adapter
local.request = self
| Request |
python | spack__spack | lib/spack/spack/test/install.py | {
"start": 12854,
"end": 22874
} | class ____(spack.error.SpackError):
pass
def test_uninstall_by_spec_errors(mutable_database):
"""Test exceptional cases with the uninstall command."""
# Try to uninstall a spec that has not been installed
spec = spack.concretize.concretize_one("dependent-install")
with pytest.raises(InstallError, match="is not installed"):
PackageBase.uninstall_by_spec(spec)
# Try an unforced uninstall of a spec with dependencies
rec = mutable_database.get_record("mpich")
with pytest.raises(PackageStillNeededError, match="Cannot uninstall"):
PackageBase.uninstall_by_spec(rec.spec)
@pytest.mark.disable_clean_stage_check
def test_nosource_pkg_install(install_mockery, mock_fetch, mock_packages, capfd, ensure_debug):
"""Test install phases with the nosource package."""
spec = spack.concretize.concretize_one("nosource")
pkg = spec.package
# Make sure install works even though there is no associated code.
PackageInstaller([pkg], explicit=True).install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
# Make sure a warning for missing code is issued
assert "Missing a source id for nosource" in out[1]
@pytest.mark.disable_clean_stage_check
def test_nosource_bundle_pkg_install(
install_mockery, mock_fetch, mock_packages, capfd, ensure_debug
):
"""Test install phases with the nosource-bundle package."""
spec = spack.concretize.concretize_one("nosource-bundle")
pkg = spec.package
# Make sure install works even though there is no associated code.
PackageInstaller([pkg], explicit=True).install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
# Make sure a warning for missing code is *not* issued
assert "Missing a source id for nosource" not in out[1]
def test_nosource_pkg_install_post_install(install_mockery, mock_fetch, mock_packages):
"""Test install phases with the nosource package with post-install."""
spec = spack.concretize.concretize_one("nosource-install")
pkg = spec.package
# Make sure both the install and post-install package methods work.
PackageInstaller([pkg], explicit=True).install()
# Ensure the file created in the package's `install` method exists.
install_txt = os.path.join(spec.prefix, "install.txt")
assert os.path.isfile(install_txt)
# Ensure the file created in the package's `post-install` method exists.
post_install_txt = os.path.join(spec.prefix, "post-install.txt")
assert os.path.isfile(post_install_txt)
def test_pkg_build_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = spack.concretize.concretize_one("trivial-install-test-package")
assert spec.package.log_path.endswith(_spack_build_logfile)
assert spec.package.env_path.endswith(_spack_build_envfile)
def test_pkg_install_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = spack.concretize.concretize_one("trivial-install-test-package")
log_path = os.path.join(spec.prefix, ".spack", _spack_build_logfile + ".gz")
assert spec.package.install_log_path == log_path
env_path = os.path.join(spec.prefix, ".spack", _spack_build_envfile)
assert spec.package.install_env_path == env_path
args_path = os.path.join(spec.prefix, ".spack", _spack_configure_argsfile)
assert spec.package.install_configure_args_path == args_path
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous install log filenames
older_log = "build.out"
fs.touch(older_log)
assert spec.package.install_log_path.endswith(older_log)
# Now check the newer install log filename
last_log = "build.txt"
fs.rename(older_log, last_log)
assert spec.package.install_log_path.endswith(last_log)
# Check the old install environment file
last_env = "build.env"
fs.rename(last_log, last_env)
assert spec.package.install_env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_log_install_without_build_files(install_mockery):
"""Test the installer log function when no build files are present."""
# Get a basic concrete spec for the trivial install package.
spec = spack.concretize.concretize_one("trivial-install-test-package")
# Attempt installing log without the build log file
with pytest.raises(OSError, match="No such file or directory"):
spack.installer.log(spec.package)
def test_log_install_with_build_files(install_mockery, monkeypatch):
"""Test the installer's log function when have build files."""
config_log = "config.log"
# Retain the original function for use in the monkey patch that is used
# to raise an exception under the desired condition for test coverage.
orig_install_fn = fs.install
def _install(src, dest):
orig_install_fn(src, dest)
if src.endswith(config_log):
raise Exception("Mock log install error")
monkeypatch.setattr(fs, "install", _install)
spec = spack.concretize.concretize_one("trivial-install-test-package")
# Set up mock build files and try again to include archive failure
log_path = spec.package.log_path
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
fs.touch(log_path)
fs.touch(spec.package.env_path)
fs.touch(spec.package.env_mods_path)
fs.touch(spec.package.configure_args_path)
install_path = os.path.dirname(spec.package.install_log_path)
fs.mkdirp(install_path)
source = spec.package.stage.source_path
config = os.path.join(source, "config.log")
fs.touchp(config)
monkeypatch.setattr(
type(spec.package), "archive_files", ["missing", "..", config], raising=False
)
spack.installer.log(spec.package)
assert os.path.exists(spec.package.install_log_path)
assert os.path.exists(spec.package.install_env_path)
assert os.path.exists(spec.package.install_configure_args_path)
archive_dir = os.path.join(install_path, "archived-files")
source_dir = os.path.dirname(source)
rel_config = os.path.relpath(config, source_dir)
assert os.path.exists(os.path.join(archive_dir, rel_config))
assert not os.path.exists(os.path.join(archive_dir, "missing"))
expected_errs = ["OUTSIDE SOURCE PATH", "FAILED TO ARCHIVE"] # for '..' # for rel_config
with open(os.path.join(archive_dir, "errors.txt"), "r", encoding="utf-8") as fd:
for ln, expected in zip(fd, expected_errs):
assert expected in ln
# Cleanup
shutil.rmtree(log_dir)
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
"""Test attempts to perform install phases with unconcretized spec."""
spec = Spec("trivial-install-test-package")
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
with pytest.raises(ValueError, match="must have a concrete spec"):
PackageInstaller([pkg_cls(spec)], explicit=True).install()
with pytest.raises(ValueError, match="only patch concrete packages"):
pkg_cls(spec).do_patch()
def test_install_error():
try:
msg = "test install error"
long_msg = "this is the long version of test install error"
raise InstallError(msg, long_msg=long_msg)
except Exception as exc:
assert exc.__class__.__name__ == "InstallError"
assert exc.message == msg
assert exc.long_message == long_msg
@pytest.mark.disable_clean_stage_check
def test_empty_install_sanity_check_prefix(
monkeypatch, install_mockery, mock_fetch, mock_packages
):
"""Test empty install triggers sanity_check_prefix."""
spec = spack.concretize.concretize_one("failing-empty-install")
with pytest.raises(spack.build_environment.ChildError, match="Nothing was installed"):
PackageInstaller([spec.package], explicit=True).install()
def test_install_from_binary_with_missing_patch_succeeds(
temporary_store: spack.store.Store, mutable_config, tmp_path: pathlib.Path, mock_packages
):
"""If a patch is missing in the local package repository, but was present when building and
pushing the package to a binary cache, installation from that binary cache shouldn't error out
because of the missing patch."""
# Create a spec s with non-existing patches
s = spack.concretize.concretize_one("trivial-install-test-package")
patches = ["a" * 64]
s_dict = s.to_dict()
s_dict["spec"]["nodes"][0]["patches"] = patches
s_dict["spec"]["nodes"][0]["parameters"]["patches"] = patches
s = Spec.from_dict(s_dict)
# Create an install dir for it
os.makedirs(os.path.join(s.prefix, ".spack"))
with open(os.path.join(s.prefix, ".spack", "spec.json"), "w", encoding="utf-8") as f:
s.to_json(f)
# And register it in the database
temporary_store.db.add(s, explicit=True)
# Push it to a binary cache
mirror = spack.mirrors.mirror.Mirror.from_local_path(str(tmp_path / "my_build_cache"))
with binary_distribution.make_uploader(mirror=mirror) as uploader:
uploader.push_or_raise([s])
# Now re-install it.
s.package.do_uninstall()
assert not temporary_store.db.query_local_by_spec_hash(s.dag_hash())
# Source install: fails, we don't have the patch.
with pytest.raises(spack.error.SpecError, match="Couldn't find patch for package"):
PackageInstaller([s.package], explicit=True).install()
# Binary install: succeeds, we don't need the patch.
spack.mirrors.utils.add(mirror)
PackageInstaller(
[s.package],
explicit=True,
root_policy="cache_only",
dependencies_policy="cache_only",
unsigned=True,
).install()
assert temporary_store.db.query_local_by_spec_hash(s.dag_hash())
| MockInstallError |
python | redis__redis-py | redis/commands/policies.py | {
"start": 6189,
"end": 7349
} | class ____(PolicyResolver):
"""
Base class for policy resolvers.
"""
def __init__(
self, policies: PolicyRecords, fallback: Optional[PolicyResolver] = None
) -> None:
self._policies = policies
self._fallback = fallback
def resolve(self, command_name: str) -> Optional[CommandPolicies]:
parts = command_name.split(".")
if len(parts) > 2:
raise ValueError(f"Wrong command or module name: {command_name}")
module, command = parts if len(parts) == 2 else ("core", parts[0])
if self._policies.get(module, None) is None:
if self._fallback is not None:
return self._fallback.resolve(command_name)
else:
return None
if self._policies.get(module).get(command, None) is None:
if self._fallback is not None:
return self._fallback.resolve(command_name)
else:
return None
return self._policies.get(module).get(command)
@abstractmethod
def with_fallback(self, fallback: "PolicyResolver") -> "PolicyResolver":
pass
| BasePolicyResolver |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 28582,
"end": 30763
} | class ____(TextNode):
"""
Node with reference to any object.
This can include other indices, query engines, retrievers.
This can also include other nodes (though this is overlapping with `relationships`
on the Node class).
"""
index_id: str
obj: Any = None
def dict(self, **kwargs: Any) -> Dict[str, Any]:
from llama_index.core.storage.docstore.utils import doc_to_json
data = super().dict(**kwargs)
try:
if self.obj is None:
data["obj"] = None
elif isinstance(self.obj, BaseNode):
data["obj"] = doc_to_json(self.obj)
elif isinstance(self.obj, BaseModel):
data["obj"] = self.obj.model_dump()
else:
data["obj"] = json.dumps(self.obj)
except Exception:
raise ValueError("IndexNode obj is not serializable: " + str(self.obj))
return data
@classmethod
def from_text_node(
cls,
node: TextNode,
index_id: str,
) -> IndexNode:
"""Create index node from text node."""
# copy all attributes from text node, add index id
return cls(
**node.dict(),
index_id=index_id,
)
# TODO: return type here not supported by current mypy version
@classmethod
def from_dict(cls, data: Dict[str, Any], **kwargs: Any) -> Self: # type: ignore
output = super().from_dict(data, **kwargs)
obj = data.get("obj")
parsed_obj = None
if isinstance(obj, str):
parsed_obj = TextNode(text=obj)
elif isinstance(obj, dict):
from llama_index.core.storage.docstore.utils import json_to_doc
# check if its a node, else assume stringable
try:
parsed_obj = json_to_doc(obj) # type: ignore[assignment]
except Exception:
parsed_obj = TextNode(text=str(obj))
output.obj = parsed_obj
return output
@classmethod
def get_type(cls) -> str:
return ObjectType.INDEX
@classmethod
def class_name(cls) -> str:
return "IndexNode"
| IndexNode |
python | sphinx-doc__sphinx | sphinx/builders/html/_build_info.py | {
"start": 370,
"end": 2642
} | class ____:
"""buildinfo file manipulator.
HTMLBuilder and its family are storing their own envdata to ``.buildinfo``.
This class is a manipulator for the file.
"""
@classmethod
def load(cls: type[BuildInfo], filename: Path, /) -> BuildInfo:
content = filename.read_text(encoding='utf-8')
lines = content.splitlines()
version = lines[0].rstrip()
if version != '# Sphinx build info version 1':
msg = __('failed to read broken build info file (unknown version)')
raise ValueError(msg)
if not lines[2].startswith('config: '):
msg = __('failed to read broken build info file (missing config entry)')
raise ValueError(msg)
if not lines[3].startswith('tags: '):
msg = __('failed to read broken build info file (missing tags entry)')
raise ValueError(msg)
build_info = BuildInfo()
build_info.config_hash = lines[2].removeprefix('config: ').strip()
build_info.tags_hash = lines[3].removeprefix('tags: ').strip()
return build_info
def __init__(
self,
config: Config | None = None,
tags: Tags | None = None,
config_categories: Set[_ConfigRebuild] = frozenset(),
) -> None:
self.config_hash = ''
self.tags_hash = ''
if config:
values = {c.name: c.value for c in config.filter(config_categories)}
self.config_hash = stable_hash(values)
if tags:
self.tags_hash = stable_hash(sorted(tags))
def __eq__(self, other: BuildInfo) -> bool: # type: ignore[override]
return (
self.config_hash == other.config_hash and self.tags_hash == other.tags_hash
)
def __hash__(self) -> int:
return hash((self.config_hash, self.tags_hash))
def dump(self, filename: Path, /) -> None:
build_info = (
'# Sphinx build info version 1\n'
'# This file records the configuration used when building these files. '
'When it is not found, a full rebuild will be done.\n'
f'config: {self.config_hash}\n'
f'tags: {self.tags_hash}\n'
)
filename.write_text(build_info, encoding='utf-8')
| BuildInfo |
python | falconry__falcon | examples/recipes/request_id_middleware.py | {
"start": 87,
"end": 414
} | class ____:
def process_request(self, req, resp):
request_id = str(uuid4())
ctx.request_id = request_id
# It may also be helpful to include the ID in the response
def process_response(self, req, resp, resource, req_succeeded):
resp.set_header('X-Request-ID', ctx.request_id)
| RequestIDMiddleware |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/test_db_io_manager.py | {
"start": 661,
"end": 1211
} | class ____(DbTypeHandler[int]):
def __init__(self):
self.handle_input_calls = []
self.handle_output_calls = []
def handle_output(self, context: OutputContext, table_slice: TableSlice, obj: int, connection):
self.handle_output_calls.append((context, table_slice, obj))
def load_input(self, context: InputContext, table_slice: TableSlice, connection) -> int:
self.handle_input_calls.append((context, table_slice))
return 7
@property
def supported_types(self):
return [int]
| IntHandler |
python | walkccc__LeetCode | solutions/775. Global and Local Inversions/775-2.py | {
"start": 0,
"end": 172
} | class ____:
def isIdealPermutation(self, nums: list[int]) -> bool:
for i, num in enumerate(nums):
if abs(num - i) > 1:
return False
return True
| Solution |
python | urllib3__urllib3 | test/with_dummyserver/test_socketlevel.py | {
"start": 78281,
"end": 79040
} | class ____(SocketDummyServerTestCase):
def _test_okay_header_parsing(self, header: bytes) -> None:
self.start_response_handler(
(b"HTTP/1.1 200 OK\r\n" b"Content-Length: 0\r\n") + header + b"\r\n\r\n"
)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
with LogRecorder() as logs:
pool.request("GET", "/")
for record in logs:
assert "Failed to parse headers" not in record.msg
def test_header_text_plain(self) -> None:
self._test_okay_header_parsing(b"Content-type: text/plain")
def test_header_message_rfc822(self) -> None:
self._test_okay_header_parsing(b"Content-type: message/rfc822")
| TestHeaderParsingContentType |
python | facebook__pyre-check | client/language_server/features.py | {
"start": 578,
"end": 973
} | class ____(enum.Enum):
ENABLED = "enabled"
DISABLED = "disabled"
@staticmethod
def from_enabled(enabled: bool) -> _Availability:
return _Availability.ENABLED if enabled else _Availability.DISABLED
def is_enabled(self) -> bool:
return self == _Availability.ENABLED
def is_disabled(self) -> bool:
return self == _Availability.DISABLED
| _Availability |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/typing/__init__.py | {
"start": 4737,
"end": 4830
} | class ____(TypedDict):
kind: Literal["duration"]
time_unit: str
| _DurationDataTypeHeader |
python | huggingface__transformers | src/transformers/models/regnet/modeling_regnet.py | {
"start": 7781,
"end": 9341
} | class ____(nn.Module):
def __init__(self, config: RegNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth))
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
@auto_docstring
| RegNetEncoder |
python | apache__airflow | providers/google/src/airflow/providers/google/common/hooks/operation_helpers.py | {
"start": 1213,
"end": 2965
} | class ____:
"""Helper class to work with `operation.Operation` objects."""
@staticmethod
def wait_for_operation_result(
operation: Operation,
timeout: int | None = None,
polling: Retry | None = None,
retry: Retry | None = None,
) -> Message:
"""
Wait for long-lasting operation result to be retrieved.
For advance usage please check the docs on:
:class:`google.api_core.future.polling.PollingFuture`
:class:`google.api_core.retry.Retry`
:param operation: The initial operation to get result from.
:param timeout: How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely. Overrides polling.timeout if both specified.
:param polling: How often and for how long to call polling RPC periodically.
:param retry: How to retry the operation polling if error occurs.
"""
try:
return operation.result(timeout=timeout, polling=polling, retry=retry)
except GoogleAPICallError as ex:
raise AirflowException("Google API error on operation result call") from ex
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def wait_for_operation(
self,
operation: Operation,
timeout: float | int | None = None,
):
"""
Legacy method name wrapper.
Intended to use with existing hooks/operators, until the proper deprecation and replacement provided.
"""
if isinstance(timeout, float):
timeout = int(timeout)
return self.wait_for_operation_result(operation=operation, timeout=timeout)
| OperationHelper |
python | kamyu104__LeetCode-Solutions | Python/maximum-strong-pair-xor-i.py | {
"start": 1985,
"end": 4077
} | class ____(object):
def maximumStrongPairXor(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
class Trie(object):
def __init__(self, bit_length):
self.__nodes = []
self.__mins = []
self.__maxs = []
self.__new_node()
self.__bit_length = bit_length
def __new_node(self):
self.__nodes.append([-1]*2)
self.__mins.append(float("inf"))
self.__maxs.append(float("-inf"))
return len(self.__nodes)-1
def insert(self, num):
curr = 0
for i in reversed(xrange(self.__bit_length)):
x = num>>i
if self.__nodes[curr][x&1] == -1:
self.__nodes[curr][x&1] = self.__new_node()
curr = self.__nodes[curr][x&1]
self.__mins[curr] = min(self.__mins[curr], num)
self.__maxs[curr] = max(self.__maxs[curr], num)
def query(self, num):
result = curr = 0
for i in reversed(xrange(self.__bit_length)):
result <<= 1
x = num>>i
y = (result|1)^x
assert(x != y)
if (self.__nodes[curr][y&1] != -1 and
((x > y and num <= 2*self.__maxs[self.__nodes[curr][y&1]]) or
(x < y and self.__mins[self.__nodes[curr][y&1]] <= 2*num))):
result |= 1
curr = self.__nodes[curr][y&1]
else:
curr = self.__nodes[curr][1^(y&1)]
return result
trie = Trie(max(nums).bit_length())
result = 0
for num in nums:
trie.insert(num)
result = max(result, trie.query(num))
return result
# Time: O(nlogr), r = max(nums)
# Space: O(n)
# bit manipulation, greedy, dp
| Solution2 |
python | celery__celery | t/integration/test_tasks.py | {
"start": 2118,
"end": 20015
} | class ____:
def test_simple_call(self):
"""Tests direct simple call of task"""
assert add(1, 1) == 2
assert add(1, 1, z=1) == 3
@flaky
def test_basic_task(self, manager):
"""Tests basic task call"""
results = []
# Tests calling task only with args
for i in range(10):
results.append([i + i, add.delay(i, i)])
for expected, result in results:
value = result.get(timeout=10)
assert value == expected
assert result.status == 'SUCCESS'
assert result.ready() is True
assert result.successful() is True
results = []
# Tests calling task with args and kwargs
for i in range(10):
results.append([3*i, add.delay(i, i, z=i)])
for expected, result in results:
value = result.get(timeout=10)
assert value == expected
assert result.status == 'SUCCESS'
assert result.ready() is True
assert result.successful() is True
@flaky
@pytest.mark.skip(reason="Broken test")
def test_multiprocess_producer(self, manager):
"""Testing multiple processes calling tasks."""
set_multiprocessing_start_method()
from multiprocessing import Pool
pool = Pool(20)
ret = pool.map(_producer, range(120))
assert list(ret) == list(range(120))
@flaky
@pytest.mark.skip(reason="Broken test")
def test_multithread_producer(self, manager):
"""Testing multiple threads calling tasks."""
set_multiprocessing_start_method()
from multiprocessing.pool import ThreadPool
pool = ThreadPool(20)
ret = pool.map(_producer, range(120))
assert list(ret) == list(range(120))
@flaky
def test_ignore_result(self, manager):
"""Testing calling task with ignoring results."""
result = add.apply_async((1, 2), ignore_result=True)
assert result.get() is None
# We wait since it takes a bit of time for the result to be
# persisted in the result backend.
time.sleep(1)
assert result.result is None
@flaky
def test_pydantic_annotations(self, manager):
"""Tests task call with Pydantic model serialization."""
results = []
# Tests calling task only with args
for i in range(10):
results.append([i + i, add_pydantic.delay({'x': i, 'y': i})])
for expected, result in results:
value = result.get(timeout=10)
assert value == {'result': expected}
assert result.status == 'SUCCESS'
assert result.ready() is True
assert result.successful() is True
@flaky
def test_pydantic_string_annotations(self, manager):
"""Tests task call with string-annotated Pydantic model."""
results = []
# Tests calling task only with args
for i in range(10):
results.append([i + i, add_pydantic_string_annotations.delay({'x': i, 'y': i})])
for expected, result in results:
value = result.get(timeout=10)
assert value == {'result': expected}
assert result.status == 'SUCCESS'
assert result.ready() is True
assert result.successful() is True
@flaky
def test_timeout(self, manager):
"""Testing timeout of getting results from tasks."""
result = sleeping.delay(10)
with pytest.raises(celery.exceptions.TimeoutError):
result.get(timeout=5)
@pytest.mark.timeout(60)
@flaky
def test_expired(self, manager):
"""Testing expiration of task."""
# Fill the queue with tasks which took > 1 sec to process
for _ in range(4):
sleeping.delay(2)
# Execute task with expiration = 1 sec
result = add.apply_async((1, 1), expires=1)
with pytest.raises(celery.exceptions.TaskRevokedError):
result.get()
assert result.status == 'REVOKED'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is False
# Fill the queue with tasks which took > 1 sec to process
for _ in range(4):
sleeping.delay(2)
# Execute task with expiration at now + 1 sec
result = add.apply_async((1, 1), expires=datetime.now(timezone.utc) + timedelta(seconds=1))
with pytest.raises(celery.exceptions.TaskRevokedError):
result.get()
assert result.status == 'REVOKED'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is False
@flaky
def test_eta(self, manager):
"""Tests tasks scheduled at some point in future."""
start = time.perf_counter()
# Schedule task to be executed in 3 seconds
result = add.apply_async((1, 1), countdown=3)
time.sleep(1)
assert result.status == 'PENDING'
assert result.ready() is False
assert result.get() == 2
end = time.perf_counter()
assert result.status == 'SUCCESS'
assert result.ready() is True
# Difference between calling the task and result must be bigger than 3 secs
assert (end - start) > 3
start = time.perf_counter()
# Schedule task to be executed at time now + 3 seconds
result = add.apply_async((2, 2), eta=datetime.now(timezone.utc) + timedelta(seconds=3))
time.sleep(1)
assert result.status == 'PENDING'
assert result.ready() is False
assert result.get() == 4
end = time.perf_counter()
assert result.status == 'SUCCESS'
assert result.ready() is True
# Difference between calling the task and result must be bigger than 3 secs
assert (end - start) > 3
@flaky
def test_fail(self, manager):
"""Tests that the failing task propagates back correct exception."""
result = fail.delay()
with pytest.raises(ExpectedException):
result.get(timeout=5)
assert result.status == 'FAILURE'
assert result.ready() is True
assert result.failed() is True
assert result.successful() is False
@flaky
def test_revoked(self, manager):
"""Testing revoking of task"""
# Fill the queue with tasks to fill the queue
for _ in range(4):
sleeping.delay(2)
# Execute task and revoke it
result = add.apply_async((1, 1))
result.revoke()
with pytest.raises(celery.exceptions.TaskRevokedError):
result.get()
assert result.status == 'REVOKED'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is False
def test_revoked_by_headers_simple_canvas(self, manager):
"""Testing revoking of task using a stamped header"""
target_monitoring_id = uuid4().hex
class MonitoringIdStampingVisitor(StampingVisitor):
def on_signature(self, sig, **headers) -> dict:
return {'monitoring_id': target_monitoring_id}
for monitoring_id in [target_monitoring_id, uuid4().hex, 4242, None]:
stamped_task = add.si(1, 1)
stamped_task.stamp(visitor=MonitoringIdStampingVisitor())
result = stamped_task.freeze()
result.revoke_by_stamped_headers(headers={'monitoring_id': [monitoring_id]})
stamped_task.apply_async()
if monitoring_id == target_monitoring_id:
with pytest.raises(celery.exceptions.TaskRevokedError):
result.get()
assert result.status == 'REVOKED'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is False
else:
assert result.get() == 2
assert result.status == 'SUCCESS'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is True
# Clear the set of revoked stamps in the worker state.
# This step is performed in each iteration of the loop to ensure that only tasks
# stamped with a specific monitoring ID will be revoked.
# For subsequent iterations with different monitoring IDs, the revoked stamps will
# not match the task's stamps, allowing those tasks to proceed successfully.
worker_state.revoked_stamps.clear()
@pytest.mark.timeout(20)
@pytest.mark.flaky(reruns=2)
def test_revoked_by_headers_complex_canvas(self, manager, subtests):
"""Testing revoking of task using a stamped header"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
for monitoring_id in ["4242", [1234, uuid4().hex]]:
# Try to purge the queue before we start
# to attempt to avoid interference from other tests
manager.wait_until_idle()
target_monitoring_id = isinstance(monitoring_id, list) and monitoring_id[0] or monitoring_id
class MonitoringIdStampingVisitor(StampingVisitor):
def on_signature(self, sig, **headers) -> dict:
return {'monitoring_id': target_monitoring_id, 'stamped_headers': ['monitoring_id']}
stamped_task = sleeping.si(4)
stamped_task.stamp(visitor=MonitoringIdStampingVisitor())
result = stamped_task.freeze()
canvas = [
group([stamped_task]),
chord(group([stamped_task]), sleeping.si(2)),
chord(group([sleeping.si(2)]), stamped_task),
chain(stamped_task),
group([sleeping.si(2), stamped_task, sleeping.si(2)]),
chord([sleeping.si(2), stamped_task], sleeping.si(2)),
chord([sleeping.si(2), sleeping.si(2)], stamped_task),
chain(sleeping.si(2), stamped_task),
chain(sleeping.si(2), group([sleeping.si(2), stamped_task, sleeping.si(2)])),
chain(sleeping.si(2), group([sleeping.si(2), stamped_task]), sleeping.si(2)),
chain(sleeping.si(2), group([sleeping.si(2), sleeping.si(2)]), stamped_task),
]
result.revoke_by_stamped_headers(headers={'monitoring_id': monitoring_id})
for sig in canvas:
sig_result = sig.apply_async()
with subtests.test(msg='Testing if task was revoked'):
with pytest.raises(celery.exceptions.TaskRevokedError):
sig_result.get()
assert result.status == 'REVOKED'
assert result.ready() is True
assert result.failed() is False
assert result.successful() is False
worker_state.revoked_stamps.clear()
@flaky
def test_revoke_by_stamped_headers_no_match(self, manager):
response = manager.app.control.revoke_by_stamped_headers(
{"myheader": ["myvalue"]},
terminate=False,
reply=True,
)
expected_response = "headers {'myheader': ['myvalue']} flagged as revoked, but not terminated"
assert response[0][list(response[0].keys())[0]]["ok"] == expected_response
@flaky
def test_wrong_arguments(self, manager):
"""Tests that proper exceptions are raised when task is called with wrong arguments."""
with pytest.raises(TypeError):
add(5)
with pytest.raises(TypeError):
add(5, 5, wrong_arg=5)
with pytest.raises(TypeError):
add.delay(5)
with pytest.raises(TypeError):
add.delay(5, wrong_arg=5)
# Tasks with typing=False are not checked but execution should fail
result = add_not_typed.delay(5)
with pytest.raises(TypeError):
result.get(timeout=5)
assert result.status == 'FAILURE'
result = add_not_typed.delay(5, wrong_arg=5)
with pytest.raises(TypeError):
result.get(timeout=5)
assert result.status == 'FAILURE'
@pytest.mark.xfail(
condition=TEST_BACKEND == "rpc",
reason="Retry failed on rpc backend",
strict=False,
)
def test_retry(self, manager):
"""Tests retrying of task."""
# Tests when max. retries is reached
result = retry.delay()
tik = time.monotonic()
while time.monotonic() < tik + 5:
status = result.status
if status != 'PENDING':
break
time.sleep(0.1)
else:
raise AssertionError("Timeout while waiting for the task to be retried")
assert status == 'RETRY'
with pytest.raises(ExpectedException):
result.get()
assert result.status == 'FAILURE'
# Tests when task is retried but after returns correct result
result = retry.delay(return_value='bar')
tik = time.monotonic()
while time.monotonic() < tik + 5:
status = result.status
if status != 'PENDING':
break
time.sleep(0.1)
else:
raise AssertionError("Timeout while waiting for the task to be retried")
assert status == 'RETRY'
assert result.get() == 'bar'
assert result.status == 'SUCCESS'
def test_retry_with_unpickleable_exception(self, manager):
"""Test a task that retries with an unpickleable exception.
We expect to be able to fetch the result (exception) correctly.
"""
job = retry_unpickleable.delay(
"foo",
"bar",
retry_kwargs={"countdown": 10, "max_retries": 1},
)
# Wait for the task to raise the Retry exception
tik = time.monotonic()
while time.monotonic() < tik + 5:
status = job.status
if status != 'PENDING':
break
time.sleep(0.1)
else:
raise AssertionError("Timeout while waiting for the task to be retried")
assert status == 'RETRY'
# Get the exception
res = job.result
assert job.status == 'RETRY' # make sure that it wasn't completed yet
# Check it. Accept both the dedicated wrapper and plain Exception
# (some environments may return a stringified Exception instead).
if isinstance(res, UnpickleableExceptionWrapper):
assert res.exc_cls_name == "UnpickleableException"
assert res.exc_args == ("foo",)
else:
# Fallback: ensure the exception string mentions the class and argument
res_str = str(res)
assert "UnpickleableException" in res_str
assert "foo" in res_str
job.revoke()
def test_fail_with_unpickleable_exception(self, manager):
"""Test a task that fails with an unpickleable exception.
We expect to be able to fetch the result (exception) correctly.
"""
result = fail_unpickleable.delay("foo", "bar")
# Accept either the dedicated wrapper exception or a plain Exception
# whose string contains the class name and args (some backends
# may return a stringified exception).
try:
result.get()
pytest.fail("Expected an exception when getting result")
except UnpickleableExceptionWrapper as exc_wrapper:
assert exc_wrapper.exc_cls_name == "UnpickleableException"
assert exc_wrapper.exc_args == ("foo",)
except Exception as exc:
# Fallback: ensure the exception string mentions the class and argument
exc_str = str(exc)
assert "UnpickleableException" in exc_str
assert "foo" in exc_str
assert result.status == 'FAILURE'
# Requires investigation why it randomly succeeds/fails
@pytest.mark.skip(reason="Randomly fails")
def test_task_accepted(self, manager, sleep=1):
r1 = sleeping.delay(sleep)
sleeping.delay(sleep)
manager.assert_accepted([r1.id])
@flaky
def test_task_retried_once(self, manager):
res = retry_once.delay()
assert res.get(timeout=TIMEOUT) == 1 # retried once
@flaky
def test_task_retried_once_with_expires(self, manager):
res = retry_once.delay(expires=60)
assert res.get(timeout=TIMEOUT) == 1 # retried once
@flaky
def test_task_retried_priority(self, manager):
res = retry_once_priority.apply_async(priority=7)
assert res.get(timeout=TIMEOUT) == 7 # retried once with priority 7
@flaky
def test_task_retried_headers(self, manager):
res = retry_once_headers.apply_async(headers={'x-test-header': 'test-value'})
headers = res.get(timeout=TIMEOUT)
assert headers is not None # retried once with headers
assert 'x-test-header' in headers # retry keeps custom headers
@flaky
def test_unicode_task(self, manager):
manager.join(
group(print_unicode.s() for _ in range(5))(),
timeout=TIMEOUT, propagate=True,
)
@flaky
def test_properties(self, celery_session_worker):
res = return_properties.apply_async(app_id="1234")
assert res.get(timeout=TIMEOUT)["app_id"] == "1234"
@flaky
def test_soft_time_limit_exceeding_time_limit(self):
with pytest.raises(ValueError, match='soft_time_limit must be less than or equal to time_limit'):
result = soft_time_limit_must_exceed_time_limit.apply_async()
result.get(timeout=5)
assert result.status == 'FAILURE'
| test_tasks |
python | gevent__gevent | src/greentest/3.9/test_signal.py | {
"start": 20428,
"end": 23669
} | class ____(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
1 / 0
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
try:
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except ZeroDivisionError:
pass
else:
sys.exit(2)
sys.exit(3)
finally:
os.close(r)
os.close(w)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
| SiginterruptTest |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 13284,
"end": 13333
} | class ____(Definition):
pass
| FunctionDefinition |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 1429,
"end": 1727
} | class ____(graphene.ObjectType):
key = graphene.String()
value = graphene.Float()
timestamp = LongString()
step = LongString()
dataset_name = graphene.String()
dataset_digest = graphene.String()
model_id = graphene.String()
run_id = graphene.String()
| MlflowModelMetric |
python | sqlalchemy__sqlalchemy | test/orm/test_options.py | {
"start": 42927,
"end": 45051
} | class ____(PathTest, QueryTest):
@classmethod
def setup_test_class(cls):
def some_col_opt_only(self, key, opts):
return self._set_column_strategy((key,), None, opts)
strategy_options._AbstractLoad.some_col_opt_only = some_col_opt_only
def some_col_opt_strategy(loadopt, key, opts):
return loadopt._set_column_strategy(
(key,), {"deferred": True, "instrument": True}, opts
)
strategy_options._AbstractLoad.some_col_opt_strategy = (
some_col_opt_strategy
)
def _assert_attrs(self, opts, expected):
User = self.classes.User
s = fixture_session()
q1 = s.query(User).options(*opts)
attr = q1._compile_context().attributes
key = (
"loader",
tuple(inspect(User)._path_registry[User.name.property]),
)
eq_(attr[key].local_opts, expected)
def test_single_opt_only(self):
User = self.classes.User
opt = strategy_options.Load(User).some_col_opt_only(
User.name, {"foo": "bar"}
)
self._assert_attrs([opt], {"foo": "bar"})
def test_bound_multiple_opt_only(self):
User = self.classes.User
opts = [
Load(User)
.some_col_opt_only(User.name, {"foo": "bar"})
.some_col_opt_only(User.name, {"bat": "hoho"})
]
self._assert_attrs(opts, {"foo": "bar", "bat": "hoho"})
def test_bound_strat_opt_recvs_from_optonly(self):
User = self.classes.User
opts = [
Load(User)
.some_col_opt_only(User.name, {"foo": "bar"})
.some_col_opt_strategy(User.name, {"bat": "hoho"})
]
self._assert_attrs(opts, {"foo": "bar", "bat": "hoho"})
def test_bound_opt_only_adds_to_strat(self):
User = self.classes.User
opts = [
Load(User)
.some_col_opt_strategy(User.name, {"bat": "hoho"})
.some_col_opt_only(User.name, {"foo": "bar"})
]
self._assert_attrs(opts, {"foo": "bar", "bat": "hoho"})
| LocalOptsTest |
python | sphinx-doc__sphinx | tests/roots/test-directive-code/target.py | {
"start": 322,
"end": 357
} | class ____:
def quux(self):
pass
| Qux |
python | python__mypy | mypy/nodes.py | {
"start": 29753,
"end": 36906
} | class ____(FuncItem, SymbolNode, Statement):
"""Function definition.
This is a non-lambda function defined using 'def'.
"""
__slots__ = (
"_name",
"is_decorated",
"is_conditional",
"abstract_status",
"original_def",
"is_trivial_body",
"is_trivial_self",
"has_self_attr_def",
"is_mypy_only",
# Present only when a function is decorated with @typing.dataclass_transform or similar
"dataclass_transform_spec",
"docstring",
"deprecated",
"original_first_arg",
)
__match_args__ = ("name", "arguments", "type", "body")
# Note that all __init__ args must have default values
def __init__(
self,
name: str = "", # Function name
arguments: list[Argument] | None = None,
body: Block | None = None,
typ: mypy.types.FunctionLike | None = None,
type_args: list[TypeParam] | None = None,
) -> None:
super().__init__(arguments, body, typ, type_args)
self._name = name
self.is_decorated = False
self.is_conditional = False # Defined conditionally (within block)?
self.abstract_status = NOT_ABSTRACT
# Is this an abstract method with trivial body?
# Such methods can't be called via super().
self.is_trivial_body = False
# Original conditional definition
self.original_def: None | FuncDef | Var | Decorator = None
# Definitions that appear in if TYPE_CHECKING are marked with this flag.
self.is_mypy_only = False
self.dataclass_transform_spec: DataclassTransformSpec | None = None
self.docstring: str | None = None
self.deprecated: str | None = None
# This is used to simplify bind_self() logic in trivial cases (which are
# the majority). In cases where self is not annotated and there are no Self
# in the signature we can simply drop the first argument.
self.is_trivial_self = False
# Keep track of functions where self attributes are defined.
self.has_self_attr_def = False
# This is needed because for positional-only arguments the name is set to None,
# but we sometimes still want to show it in error messages.
if arguments:
self.original_first_arg: str | None = arguments[0].variable.name
else:
self.original_first_arg = None
@property
def name(self) -> str:
return self._name
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_func_def(self)
def serialize(self) -> JsonDict:
# We're deliberating omitting arguments and storing only arg_names and
# arg_kinds for space-saving reasons (arguments is not used in later
# stages of mypy).
# TODO: After a FuncDef is deserialized, the only time we use `arg_names`
# and `arg_kinds` is when `type` is None and we need to infer a type. Can
# we store the inferred type ahead of time?
return {
".class": "FuncDef",
"name": self._name,
"fullname": self._fullname,
"arg_names": self.arg_names,
"arg_kinds": [int(x.value) for x in self.arg_kinds],
"type": None if self.type is None else self.type.serialize(),
"flags": get_flags(self, FUNCDEF_FLAGS),
"abstract_status": self.abstract_status,
# TODO: Do we need expanded, original_def?
"dataclass_transform_spec": (
None
if self.dataclass_transform_spec is None
else self.dataclass_transform_spec.serialize()
),
"deprecated": self.deprecated,
"original_first_arg": self.original_first_arg,
}
@classmethod
def deserialize(cls, data: JsonDict) -> FuncDef:
assert data[".class"] == "FuncDef"
body = Block([])
ret = FuncDef(
data["name"],
[],
body,
(
None
if data["type"] is None
else cast(mypy.types.FunctionLike, mypy.types.deserialize_type(data["type"]))
),
)
ret._fullname = data["fullname"]
set_flags(ret, data["flags"])
# NOTE: ret.info is set in the fixup phase.
ret.arg_names = data["arg_names"]
ret.original_first_arg = data.get("original_first_arg")
ret.arg_kinds = [ARG_KINDS[x] for x in data["arg_kinds"]]
ret.abstract_status = data["abstract_status"]
ret.dataclass_transform_spec = (
DataclassTransformSpec.deserialize(data["dataclass_transform_spec"])
if data["dataclass_transform_spec"] is not None
else None
)
ret.deprecated = data["deprecated"]
# Leave these uninitialized so that future uses will trigger an error
del ret.arguments
del ret.max_pos
del ret.min_args
return ret
def write(self, data: WriteBuffer) -> None:
write_tag(data, FUNC_DEF)
write_str(data, self._name)
mypy.types.write_type_opt(data, self.type)
write_str(data, self._fullname)
write_flags(data, self, FUNCDEF_FLAGS)
write_str_opt_list(data, self.arg_names)
write_int_list(data, [int(ak.value) for ak in self.arg_kinds])
write_int(data, self.abstract_status)
if self.dataclass_transform_spec is None:
write_tag(data, LITERAL_NONE)
else:
self.dataclass_transform_spec.write(data)
write_str_opt(data, self.deprecated)
write_str_opt(data, self.original_first_arg)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> FuncDef:
name = read_str(data)
typ: mypy.types.FunctionLike | None = None
tag = read_tag(data)
if tag != LITERAL_NONE:
typ = mypy.types.read_function_like(data, tag)
ret = FuncDef(name, [], Block([]), typ)
ret._fullname = read_str(data)
read_flags(data, ret, FUNCDEF_FLAGS)
# NOTE: ret.info is set in the fixup phase.
ret.arg_names = read_str_opt_list(data)
ret.arg_kinds = [ARG_KINDS[ak] for ak in read_int_list(data)]
ret.abstract_status = read_int(data)
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == DT_SPEC
ret.dataclass_transform_spec = DataclassTransformSpec.read(data)
ret.deprecated = read_str_opt(data)
ret.original_first_arg = read_str_opt(data)
# Leave these uninitialized so that future uses will trigger an error
del ret.arguments
del ret.max_pos
del ret.min_args
assert read_tag(data) == END_TAG
return ret
# All types that are both SymbolNodes and FuncBases. See the FuncBase
# docstring for the rationale.
# See https://github.com/python/mypy/pull/13607#issuecomment-1236357236
# TODO: we want to remove this at some point and just use `FuncBase` ideally.
SYMBOL_FUNCBASE_TYPES: Final = (OverloadedFuncDef, FuncDef)
| FuncDef |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/exasol_to_s3.py | {
"start": 1241,
"end": 4466
} | class ____(BaseOperator):
"""
Export data from Exasol database to AWS S3 bucket.
:param query_or_table: the sql statement to be executed or table name to export
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:param query_params: Query parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
:param export_params: Extra parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
"""
template_fields: Sequence[str] = ("query_or_table", "key", "bucket_name", "query_params", "export_params")
template_fields_renderers = {"query_or_table": "sql", "query_params": "json", "export_params": "json"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#ededed"
def __init__(
self,
*,
query_or_table: str,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
query_params: dict | None = None,
export_params: dict | None = None,
exasol_conn_id: str = "exasol_default",
aws_conn_id: str | None = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query_or_table = query_or_table
self.key = key
self.bucket_name = bucket_name
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
self.query_params = query_params
self.export_params = export_params
self.exasol_conn_id = exasol_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
exasol_hook = ExasolHook(exasol_conn_id=self.exasol_conn_id)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
with NamedTemporaryFile("w+") as file:
exasol_hook.export_to_file(
filename=file.name,
query_or_table=self.query_or_table,
export_params=self.export_params,
query_params=self.query_params,
)
file.flush()
self.log.info("Uploading the data as %s", self.key)
s3_hook.load_file(
filename=file.name,
key=self.key,
bucket_name=self.bucket_name,
replace=self.replace,
encrypt=self.encrypt,
gzip=self.gzip,
acl_policy=self.acl_policy,
)
self.log.info("Data uploaded")
return self.key
| ExasolToS3Operator |
python | apache__airflow | devel-common/src/tests_common/test_utils/mock_operators.py | {
"start": 2216,
"end": 2529
} | class ____(BaseOperator):
"""
Empty test operator without extra operator link.
Example of an operator that has no extra Operator link.
An operator link would be added to this operator via Airflow plugin.
"""
operator_extra_links = ()
@attr.s(auto_attribs=True)
| EmptyNoExtraLinkTestOperator |
python | django__django | tests/requests_tests/test_data_upload_settings.py | {
"start": 6634,
"end": 8175
} | class ____(SimpleTestCase):
def setUp(self):
payload = FakePayload(
"\r\n".join(
[
"--boundary",
(
'Content-Disposition: form-data; name="name1"; '
'filename="name1.txt"'
),
"",
"value1",
"--boundary",
(
'Content-Disposition: form-data; name="name2"; '
'filename="name2.txt"'
),
"",
"value2",
"--boundary--",
]
)
)
self.request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
def test_number_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FILES=1):
with self.assertRaisesMessage(TooManyFilesSent, TOO_MANY_FILES_MSG):
self.request._load_post_and_files()
def test_number_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FILES=2):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FILES=None):
self.request._load_post_and_files()
| DataUploadMaxNumberOfFilesMultipartPost |
python | ray-project__ray | rllib/models/tf/layers/multi_head_attention.py | {
"start": 410,
"end": 2257
} | class ____(tf.keras.layers.Layer if tf else object):
"""A multi-head attention layer described in [1]."""
def __init__(self, out_dim: int, num_heads: int, head_dim: int, **kwargs):
super().__init__(**kwargs)
# No bias or non-linearity.
self._num_heads = num_heads
self._head_dim = head_dim
self._qkv_layer = tf.keras.layers.Dense(
3 * num_heads * head_dim, use_bias=False
)
self._linear_layer = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(out_dim, use_bias=False)
)
if log_once("multi_head_attention"):
deprecation_warning(
old="rllib.models.tf.layers.MultiHeadAttention",
)
def call(self, inputs: TensorType) -> TensorType:
L = tf.shape(inputs)[1] # length of segment
H = self._num_heads # number of attention heads
D = self._head_dim # attention head dimension
qkv = self._qkv_layer(inputs)
queries, keys, values = tf.split(qkv, 3, -1)
queries = queries[:, -L:] # only query based on the segment
queries = tf.reshape(queries, [-1, L, H, D])
keys = tf.reshape(keys, [-1, L, H, D])
values = tf.reshape(values, [-1, L, H, D])
score = tf.einsum("bihd,bjhd->bijh", queries, keys)
score = score / D**0.5
# causal mask of the same length as the sequence
mask = tf.sequence_mask(tf.range(1, L + 1), dtype=score.dtype)
mask = mask[None, :, :, None]
masked_score = score * mask + 1e30 * (mask - 1.0)
wmat = tf.nn.softmax(masked_score, axis=2)
out = tf.einsum("bijh,bjhd->bihd", wmat, values)
shape = tf.concat([tf.shape(out)[:2], [H * D]], axis=0)
out = tf.reshape(out, shape)
return self._linear_layer(out)
| MultiHeadAttention |
python | doocs__leetcode | solution/1600-1699/1685.Sum of Absolute Differences in a Sorted Array/Solution.py | {
"start": 0,
"end": 289
} | class ____:
def getSumAbsoluteDifferences(self, nums: List[int]) -> List[int]:
ans = []
s, t = sum(nums), 0
for i, x in enumerate(nums):
v = x * i - t + s - t - x * (len(nums) - i)
ans.append(v)
t += x
return ans
| Solution |
python | Textualize__textual | src/textual/_event_broker.py | {
"start": 158,
"end": 999
} | class ____(NamedTuple):
"""Information for event handler."""
modifiers: set[str]
action: Any
def extract_handler_actions(event_name: str, meta: dict[str, Any]) -> HandlerArguments:
"""Extract action from meta dict.
Args:
event_name: Event to check from.
meta: Meta information (stored in Rich Style)
Raises:
NoHandler: If no handler is found.
Returns:
Action information.
"""
event_path = event_name.split(".")
for key, value in meta.items():
if key.startswith("@"):
name_args = key[1:].split(".")
if name_args[: len(event_path)] == event_path:
modifiers = name_args[len(event_path) :]
return HandlerArguments(set(modifiers), value)
raise NoHandler(f"No handler for {event_name!r}")
| HandlerArguments |
python | pytorch__pytorch | test/dynamo/test_repros.py | {
"start": 26402,
"end": 26632
} | class ____(list):
def __call__(self, x):
for processor in self:
x = processor(x)
return x
def clear(self):
pass # this prevents RestrictedListSubclassVariable from kicking in
| CustomList1 |
python | numba__numba | numba/core/typing/mathdecl.py | {
"start": 3190,
"end": 3523
} | class ____(ConcreteTemplate):
cases = [
signature(types.float64, types.float64, types.int64),
signature(types.float64, types.float64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
@infer_global(math.gcd)
| Math_pow |
python | plotly__plotly.py | _plotly_utils/basevalidators.py | {
"start": 25244,
"end": 29786
} | class ____(BaseValidator):
"""
"integer": {
"description": "An integer or an integer inside a string. When
applicable, values greater (less) than `max`
(`min`) are coerced to the `dflt`.",
"requiredOpts": [],
"otherOpts": [
"dflt",
"min",
"max",
"extras",
"arrayOk"
]
},
"""
def __init__(
self,
plotly_name,
parent_name,
min=None,
max=None,
extras=None,
array_ok=False,
**kwargs,
):
super(IntegerValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
# Handle min
if min is None and max is not None:
# Max was specified, so make min -inf
self.min_val = -sys.maxsize - 1
else:
self.min_val = min
# Handle max
if max is None and min is not None:
# Min was specified, so make min inf
self.max_val = sys.maxsize
else:
self.max_val = max
if min is not None or max is not None:
self.has_min_max = True
else:
self.has_min_max = False
self.extras = extras if extras is not None else []
self.array_ok = array_ok
def description(self):
desc = """\
The '{plotly_name}' property is a integer and may be specified as:""".format(
plotly_name=self.plotly_name
)
if not self.has_min_max:
desc = (
desc
+ """
- An int (or float that will be cast to an int)"""
)
else:
desc = desc + (
"""
- An int (or float that will be cast to an int)
in the interval [{min_val}, {max_val}]""".format(
min_val=self.min_val, max_val=self.max_val
)
)
# Extras
if self.extras:
desc = desc + (
"""
OR exactly one of {extras} (e.g. '{eg_extra}')"""
).format(extras=self.extras, eg_extra=self.extras[-1])
if self.array_ok:
desc = (
desc
+ """
- A tuple, list, or one-dimensional numpy array of the above"""
)
return desc
def validate_coerce(self, v):
if is_none_or_typed_array_spec(v):
pass
elif v in self.extras:
return v
elif self.array_ok and is_homogeneous_array(v):
np = get_module("numpy")
v_array = copy_to_readonly_numpy_array(
v, kind=("i", "u"), force_numeric=True
)
if v_array.dtype.kind not in ["i", "u"]:
self.raise_invalid_val(v)
# Check min/max
if self.has_min_max:
v_valid = np.logical_and(
self.min_val <= v_array, v_array <= self.max_val
)
if not np.all(v_valid):
# Grab up to the first 10 invalid values
v_invalid = np.logical_not(v_valid)
some_invalid_els = np.array(v, dtype="object")[v_invalid][
:10
].tolist()
self.raise_invalid_elements(some_invalid_els)
v = v_array
elif self.array_ok and is_simple_array(v):
# Check integer type
invalid_els = [
e for e in v if not isinstance(e, int) and e not in self.extras
]
if invalid_els:
self.raise_invalid_elements(invalid_els[:10])
# Check min/max
if self.has_min_max:
invalid_els = [
e
for e in v
if not (isinstance(e, int) and self.min_val <= e <= self.max_val)
and e not in self.extras
]
if invalid_els:
self.raise_invalid_elements(invalid_els[:10])
v = to_scalar_or_list(v)
else:
# Check int
if not isinstance(v, int):
# don't let int() cast strings to ints
self.raise_invalid_val(v)
# Check min/max
if self.has_min_max:
if not (self.min_val <= v <= self.max_val):
self.raise_invalid_val(v)
return v
| IntegerValidator |
python | rq__rq | tests/test_job.py | {
"start": 758,
"end": 34796
} | class ____(RQTestCase):
def test_unicode(self):
"""Unicode in job description [issue405]"""
job = Job.create(
'myfunc',
args=[12, '☃'],
kwargs=dict(snowman='☃', null=None),
connection=self.connection,
)
self.assertEqual(
job.description,
"myfunc(12, '☃', null=None, snowman='☃')",
)
def test_create_empty_job(self):
"""Creation of new empty jobs."""
job = Job(uuid4().hex, connection=self.connection)
job.description = 'test job'
# Jobs have a random UUID and a creation date
self.assertIsNotNone(job.id)
self.assertIsNotNone(job.created_at)
self.assertEqual(str(job), f'<Job {job.id}: test job>')
# ...and nothing else
self.assertEqual(job.origin, '')
self.assertIsNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertIsNone(job.result)
self.assertIsNone(job._exc_info)
with self.assertRaises(DeserializationError):
job.func
with self.assertRaises(DeserializationError):
job.instance
with self.assertRaises(DeserializationError):
job.args
with self.assertRaises(DeserializationError):
job.kwargs
def test_create_param_errors(self):
"""Creation of jobs may result in errors"""
self.assertRaises(TypeError, Job.create, fixtures.say_hello, args='string')
self.assertRaises(TypeError, Job.create, fixtures.say_hello, kwargs='string')
self.assertRaises(TypeError, Job.create, func=42)
def test_create_typical_job(self):
"""Creation of jobs for function calls."""
job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2), connection=self.connection)
# Jobs have a random UUID
self.assertIsNotNone(job.id)
self.assertIsNotNone(job.created_at)
self.assertIsNotNone(job.description)
self.assertIsNone(job.instance)
# Job data is set...
self.assertEqual(job.func, fixtures.some_calculation)
self.assertEqual(job.args, (3, 4))
self.assertEqual(job.kwargs, {'z': 2})
# ...but metadata is not
self.assertEqual(job.origin, '')
self.assertIsNone(job.enqueued_at)
self.assertIsNone(job.result)
def test_create_instance_method_job(self):
"""Creation of jobs for instance methods."""
n = fixtures.Number(2)
job = Job.create(func=n.div, args=(4,), connection=self.connection)
# Job data is set
self.assertEqual(job.func, n.div)
self.assertEqual(job.instance, n)
self.assertEqual(job.args, (4,))
def test_create_job_with_serializer(self):
"""Creation of jobs with serializer for instance methods."""
# Test using json serializer
n = fixtures.Number(2)
job = Job.create(func=n.div, args=(4,), serializer=json, connection=self.connection)
self.assertIsNotNone(job.serializer)
self.assertEqual(job.func, n.div)
self.assertEqual(job.instance, n)
self.assertEqual(job.args, (4,))
def test_create_job_from_string_function(self):
"""Creation of jobs using string specifier."""
job = Job.create(func='tests.fixtures.say_hello', args=('World',), connection=self.connection)
# Job data is set
self.assertEqual(job.func, fixtures.say_hello)
self.assertIsNone(job.instance)
self.assertEqual(job.args, ('World',))
def test_create_job_from_callable_class(self):
"""Creation of jobs using a callable class specifier."""
kallable = fixtures.CallableObject()
job = Job.create(func=kallable, connection=self.connection)
self.assertEqual(job.func, kallable.__call__)
self.assertEqual(job.instance, kallable)
def test_job_properties_set_data_property(self):
"""Data property gets derived from the job tuple."""
job = Job(id=uuid4().hex, connection=self.connection)
job.func_name = 'foo'
fname, instance, args, kwargs = loads(job.data)
self.assertEqual(fname, job.func_name)
self.assertEqual(instance, None)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_data_property_sets_job_properties(self):
"""Job tuple gets derived lazily from data property."""
job = Job(id=uuid4().hex, connection=self.connection)
job.data = dumps(('foo', None, (1, 2, 3), {'bar': 'qux'}))
self.assertEqual(job.func_name, 'foo')
self.assertEqual(job.instance, None)
self.assertEqual(job.args, (1, 2, 3))
self.assertEqual(job.kwargs, {'bar': 'qux'})
def test_save(self): # noqa
"""Storing jobs."""
job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2), connection=self.connection)
# Saving creates a Redis hash
self.assertEqual(self.connection.exists(job.key), False)
job.save()
self.assertEqual(self.connection.type(job.key), b'hash')
# Saving writes pickled job data
unpickled_data = loads(zlib.decompress(self.connection.hget(job.key, 'data')))
self.assertEqual(unpickled_data[0], 'tests.fixtures.some_calculation')
def test_fetch(self):
"""Fetching jobs."""
# Prepare test
self.connection.hset(
'rq:job:some_id', 'data', "(S'tests.fixtures.some_calculation'\nN(I3\nI4\nt(dp1\nS'z'\nI2\nstp2\n."
)
self.connection.hset('rq:job:some_id', 'created_at', '2012-02-07T22:13:24.123456Z')
# Fetch returns a job
job = Job.fetch('some_id', connection=self.connection)
self.assertEqual(job.id, 'some_id')
self.assertEqual(job.func_name, 'tests.fixtures.some_calculation')
self.assertIsNone(job.instance)
self.assertEqual(job.args, (3, 4))
self.assertEqual(job.kwargs, dict(z=2))
self.assertEqual(job.created_at, datetime(2012, 2, 7, 22, 13, 24, 123456, tzinfo=timezone.utc))
# Job.fetch also works with execution IDs
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
worker = Worker([queue], connection=self.connection)
worker.prepare_execution(job=job)
worker.prepare_job_execution(job=job)
execution = worker.execution
self.assertEqual(Job.fetch(execution.composite_key, self.connection), job) # type: ignore
self.assertEqual(Job.fetch(job.id, self.connection), job)
def test_fetch_many(self):
"""Fetching many jobs at once."""
data = {
'func': fixtures.some_calculation,
'args': (3, 4),
'kwargs': dict(z=2),
'connection': self.connection,
}
job = Job.create(**data)
job.save()
job2 = Job.create(**data)
job2.save()
jobs = Job.fetch_many([job.id, job2.id, 'invalid_id'], self.connection)
self.assertEqual(jobs, [job, job2, None])
# Job.fetch_many also works with execution IDs
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
worker = Worker([queue], connection=self.connection)
worker.prepare_execution(job=job)
worker.prepare_job_execution(job=job)
execution = worker.execution
self.assertEqual(Job.fetch_many([execution.composite_key], self.connection), [job]) # type: ignore
self.assertEqual(Job.fetch_many([job.id], self.connection), [job])
def test_persistence_of_empty_jobs(self): # noqa
"""Storing empty jobs."""
job = Job(id='foo', connection=self.connection)
with self.assertRaises(ValueError):
job.save()
def test_persistence_of_typical_jobs(self):
"""Storing typical jobs."""
job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2), connection=self.connection)
job.save()
stored_date = self.connection.hget(job.key, 'created_at').decode('utf-8')
self.assertEqual(stored_date, utcformat(job.created_at))
# ... and no other keys are stored
self.assertEqual(
{
b'created_at',
b'data',
b'description',
b'ended_at',
b'last_heartbeat',
b'started_at',
b'worker_name',
b'success_callback_name',
b'failure_callback_name',
b'stopped_callback_name',
b'group_id',
b'status',
},
set(self.connection.hkeys(job.key)),
)
self.assertEqual(job.last_heartbeat, None)
self.assertEqual(job.last_heartbeat, None)
ts = now()
job.heartbeat(ts, 0)
self.assertEqual(job.last_heartbeat, ts)
def test_persistence_of_parent_job(self):
"""Storing jobs with parent job, either instance or key."""
parent_job = Job.create(func=fixtures.some_calculation, connection=self.connection)
parent_job.save()
job = Job.create(func=fixtures.some_calculation, depends_on=parent_job, connection=self.connection)
job.save()
stored_job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(stored_job._dependency_id, parent_job.id)
self.assertEqual(stored_job._dependency_ids, [parent_job.id])
self.assertEqual(stored_job.dependency.id, parent_job.id)
self.assertEqual(stored_job.dependency, parent_job)
job = Job.create(func=fixtures.some_calculation, depends_on=parent_job.id, connection=self.connection)
job.save()
stored_job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(stored_job._dependency_id, parent_job.id)
self.assertEqual(stored_job._dependency_ids, [parent_job.id])
self.assertEqual(stored_job.dependency.id, parent_job.id)
self.assertEqual(stored_job.dependency, parent_job)
def test_persistence_of_callbacks(self):
"""Storing jobs with success and/or failure callbacks."""
job = Job.create(
func=fixtures.some_calculation,
on_success=Callback(fixtures.say_hello, timeout=10),
on_failure=fixtures.say_pid,
on_stopped=fixtures.say_hello,
connection=self.connection,
) # deprecated callable
job.save()
stored_job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(fixtures.say_hello, stored_job.success_callback)
self.assertEqual(10, stored_job.success_callback_timeout)
self.assertEqual(fixtures.say_pid, stored_job.failure_callback)
self.assertEqual(fixtures.say_hello, stored_job.stopped_callback)
self.assertEqual(CALLBACK_TIMEOUT, stored_job.failure_callback_timeout)
self.assertEqual(CALLBACK_TIMEOUT, stored_job.stopped_callback_timeout)
# None(s)
job = Job.create(func=fixtures.some_calculation, on_failure=None, connection=self.connection)
job.save()
stored_job = Job.fetch(job.id, connection=self.connection)
self.assertIsNone(stored_job.success_callback)
self.assertEqual(CALLBACK_TIMEOUT, job.success_callback_timeout) # timeout should be never none
self.assertEqual(CALLBACK_TIMEOUT, stored_job.success_callback_timeout)
self.assertIsNone(stored_job.failure_callback)
self.assertEqual(CALLBACK_TIMEOUT, job.failure_callback_timeout) # timeout should be never none
self.assertEqual(CALLBACK_TIMEOUT, stored_job.failure_callback_timeout)
self.assertEqual(CALLBACK_TIMEOUT, job.stopped_callback_timeout) # timeout should be never none
self.assertIsNone(stored_job.stopped_callback)
def test_store_then_fetch(self):
"""Store, then fetch."""
job = Job.create(
func=fixtures.some_calculation, timeout=3600, args=(3, 4), kwargs=dict(z=2), connection=self.connection
)
job.save()
job2 = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.func, job2.func)
self.assertEqual(job.args, job2.args)
self.assertEqual(job.kwargs, job2.kwargs)
self.assertEqual(job.timeout, job2.timeout)
# Mathematical equation
self.assertEqual(job, job2)
def test_fetching_can_fail(self):
"""Fetching fails for non-existing jobs."""
with self.assertRaises(NoSuchJobError):
Job.fetch('b4a44d44-da16-4620-90a6-798e8cd72ca0', connection=self.connection)
def test_fetching_unreadable_data(self):
"""Fetching succeeds on unreadable data, but lazy props fail."""
# Set up
job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2), connection=self.connection)
job.save()
# Just replace the data hkey with some random noise
self.connection.hset(job.key, 'data', 'this is no pickle string')
job.refresh()
for attr in ('func_name', 'instance', 'args', 'kwargs'):
with self.assertRaises(Exception):
getattr(job, attr)
def test_job_is_unimportable(self):
"""Jobs that cannot be imported throw exception on access."""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
# Now slightly modify the job to make it unimportable (this is
# equivalent to a worker not having the most up-to-date source code
# and unable to import the function)
job_data = job.data
unimportable_data = job_data.replace(b'say_hello', b'nay_hello')
self.connection.hset(job.key, 'data', zlib.compress(unimportable_data))
job.refresh()
with self.assertRaises(ValueError):
job.func # accessing the func property should fail
def test_compressed_exc_info_handling(self):
"""Jobs handle both compressed and uncompressed exc_info"""
exception_string = 'Some exception'
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job._exc_info = exception_string
job.save()
# exc_info is stored in compressed format
exc_info = self.connection.hget(job.key, 'exc_info')
self.assertEqual(as_text(zlib.decompress(exc_info)), exception_string)
job.refresh()
self.assertEqual(job.exc_info, exception_string)
# Uncompressed exc_info is also handled
self.connection.hset(job.key, 'exc_info', exception_string)
job.refresh()
self.assertEqual(job.exc_info, exception_string)
def test_compressed_job_data_handling(self):
"""Jobs handle both compressed and uncompressed data"""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
# Job data is stored in compressed format
job_data = job.data
self.assertEqual(zlib.compress(job_data), self.connection.hget(job.key, 'data'))
self.connection.hset(job.key, 'data', job_data)
job.refresh()
self.assertEqual(job.data, job_data)
def test_custom_meta_is_persisted(self):
"""Additional meta data on jobs are stored persisted correctly."""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.meta['foo'] = 'bar'
job.save()
raw_data = self.connection.hget(job.key, 'meta')
self.assertEqual(loads(raw_data)['foo'], 'bar')
job2 = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job2.meta['foo'], 'bar')
def test_get_meta(self):
"""Test get_meta() function"""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.meta['foo'] = 'bar'
job.save()
self.assertEqual(job.get_meta()['foo'], 'bar')
# manually write different data in meta
self.connection.hset(job.key, 'meta', dumps({'fee': 'boo'}))
# check if refresh=False keeps old data
self.assertEqual(job.get_meta(False)['foo'], 'bar')
# check if meta is updated
self.assertEqual(job.get_meta()['fee'], 'boo')
def test_custom_meta_is_rewriten_by_save_meta(self):
"""New meta data can be stored by save_meta."""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
serialized = job.to_dict()
job.meta['foo'] = 'bar'
job.save_meta()
raw_meta = self.connection.hget(job.key, 'meta')
self.assertEqual(loads(raw_meta)['foo'], 'bar')
job2 = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job2.meta['foo'], 'bar')
# nothing else was changed
serialized2 = job2.to_dict()
serialized2.pop('meta')
self.assertDictEqual(serialized, serialized2)
def test_unpickleable_result(self):
"""Unpickleable job result doesn't crash job.save() and job.refresh()"""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job._result = queue.Queue()
job.save()
self.assertEqual(self.connection.hget(job.key, 'result').decode('utf-8'), 'Unserializable return value')
job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.result, 'Unserializable return value')
def test_result_ttl_is_persisted(self):
"""Ensure that job's result_ttl is set properly"""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), result_ttl=10, connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.result_ttl, 10)
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.result_ttl, None)
def test_failure_ttl_is_persisted(self):
"""Ensure job.failure_ttl is set and restored properly"""
job = Job.create(func=fixtures.say_hello, args=('Lionel',), failure_ttl=15, connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.failure_ttl, 15)
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.failure_ttl, None)
def test_description_is_persisted(self):
"""Ensure that job's custom description is set properly"""
job = Job.create(
func=fixtures.say_hello, args=('Lionel',), description='Say hello!', connection=self.connection
)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.description, 'Say hello!')
# Ensure job description is constructed from function call string
job = Job.create(func=fixtures.say_hello, args=('Lionel',), connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.description, "tests.fixtures.say_hello('Lionel')")
def test_prepare_for_execution(self):
"""job.prepare_for_execution works properly"""
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job.save()
with self.connection.pipeline() as pipeline:
job.prepare_for_execution('worker_name', pipeline)
pipeline.execute()
job.refresh()
self.assertEqual(job.worker_name, 'worker_name')
self.assertEqual(job.get_status(), JobStatus.STARTED)
self.assertIsNotNone(job.last_heartbeat)
self.assertIsNotNone(job.started_at)
def test_job_status_always_exists(self):
"""Job status is always guaranteed to exist, defaulting to CREATED."""
job = Job.create(func=fixtures.say_hello, connection=self.connection)
self.assertEqual(job.get_status(refresh=False), JobStatus.CREATED)
job.save()
self.assertEqual(job.get_status(refresh=True), JobStatus.CREATED)
def test_get_status_fails_when_job_deleted_from_redis(self):
"""get_status() raises InvalidJobOperation when job hash is deleted from Redis."""
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job.save()
# Delete the job hash from Redis
self.connection.delete(job.key)
# Now get_status with refresh should raise an exception
self.assertRaises(InvalidJobOperation, job.get_status, refresh=True)
def test_job_access_outside_job_fails(self):
"""The current job is accessible only within a job context."""
self.assertIsNone(get_current_job())
def test_job_access_within_job_function(self):
"""The current job is accessible within the job function."""
q = Queue(connection=self.connection)
job = q.enqueue(fixtures.access_self)
w = Worker([q])
w.work(burst=True)
# access_self calls get_current_job() and executes successfully
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_job_access_within_synchronous_job_function(self):
queue = Queue(is_async=False, connection=self.connection)
queue.enqueue(fixtures.access_self)
def test_job_async_status_finished(self):
queue = Queue(is_async=False, connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
self.assertEqual(job.result, 'Hi there, Stranger!')
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_enqueue_job_async_status_finished(self):
queue = Queue(is_async=False, connection=self.connection)
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job = queue.enqueue_job(job)
self.assertEqual(job.result, 'Hi there, Stranger!')
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_get_result_ttl(self):
"""Getting job result TTL."""
job_result_ttl = 1
default_ttl = 2
job = Job.create(func=fixtures.say_hello, result_ttl=job_result_ttl, connection=self.connection)
job.save()
self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), job_result_ttl)
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job.save()
self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), default_ttl)
def test_get_job_ttl(self):
"""Getting job TTL."""
ttl = 1
job = Job.create(func=fixtures.say_hello, ttl=ttl, connection=self.connection)
job.save()
self.assertEqual(job.get_ttl(), ttl)
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job.save()
self.assertEqual(job.get_ttl(), None)
def test_ttl_via_enqueue(self):
ttl = 1
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello, ttl=ttl)
self.assertEqual(job.get_ttl(), ttl)
def test_never_expire_during_execution(self):
"""Test what happens when job expires during execution"""
ttl = 1
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.long_running_job, args=(2,), ttl=ttl)
self.assertEqual(job.get_ttl(), ttl)
job.save()
job.perform()
self.assertEqual(job.get_ttl(), ttl)
self.assertTrue(job.exists(job.id, connection=self.connection))
self.assertEqual(job.result, 'Done sleeping...')
def test_cleanup(self):
"""Test that jobs and results are expired properly."""
job = Job.create(func=fixtures.say_hello, connection=self.connection, status=JobStatus.QUEUED)
job.save()
# Jobs with negative TTLs don't expire
job.cleanup(ttl=-1)
self.assertEqual(self.connection.ttl(job.key), -1)
# Jobs with positive TTLs are eventually deleted
job.cleanup(ttl=100)
self.assertEqual(self.connection.ttl(job.key), 100)
# Jobs with 0 TTL are immediately deleted
job.cleanup(ttl=0)
self.assertRaises(NoSuchJobError, Job.fetch, job.id, self.connection)
def test_job_get_position(self):
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
job2 = queue.enqueue(fixtures.say_hello)
job3 = Job(uuid4().hex, connection=self.connection)
self.assertEqual(0, job.get_position())
self.assertEqual(1, job2.get_position())
self.assertEqual(None, job3.get_position())
def test_job_delete_removes_itself_from_registries(self):
"""job.delete() should remove itself from job registries"""
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.FAILED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = FailedJobRegistry(connection=self.connection, serializer=JSONSerializer)
registry.add(job, 500)
job.delete()
self.assertNotIn(job, registry)
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.STOPPED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = FailedJobRegistry(connection=self.connection, serializer=JSONSerializer)
registry.add(job, 500)
job.delete()
self.assertNotIn(job, registry)
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.FINISHED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = FinishedJobRegistry(connection=self.connection, serializer=JSONSerializer)
registry.add(job, 500)
job.delete()
self.assertNotIn(job, registry)
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.STARTED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = StartedJobRegistry(connection=self.connection, serializer=JSONSerializer)
with self.connection.pipeline() as pipe:
# this will also add the execution to the registry
Execution.create(job, ttl=500, pipeline=pipe)
pipe.execute()
job.delete()
self.assertNotIn(job, registry)
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.DEFERRED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = DeferredJobRegistry(connection=self.connection, serializer=JSONSerializer)
registry.add(job, 500)
job.delete()
self.assertNotIn(job, registry)
job = Job.create(
func=fixtures.say_hello,
status=JobStatus.SCHEDULED,
connection=self.connection,
origin='default',
serializer=JSONSerializer,
)
job.save()
registry = ScheduledJobRegistry(connection=self.connection, serializer=JSONSerializer)
registry.add(job, 500)
job.delete()
self.assertNotIn(job, registry)
def test_job_delete_execution_registry(self):
"""job.delete() also deletes ExecutionRegistry and all job executions"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
worker = Worker([queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
self.assertTrue(self.connection.exists(job.execution_registry.key))
self.assertTrue(self.connection.exists(execution.key))
job.delete()
self.assertFalse(self.connection.exists(job.execution_registry.key))
self.assertFalse(self.connection.exists(execution.key))
def test_create_job_with_id(self):
"""test creating jobs with a custom ID"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello, job_id='1234')
self.assertEqual(job.id, '1234')
job.perform()
self.assertRaises(TypeError, queue.enqueue, fixtures.say_hello, job_id=1234)
def test_create_job_with_invalid_id(self):
"""test creating jobs with a custom invalid ID (with character :)"""
queue = Queue(connection=self.connection)
with self.assertRaises(ValueError):
queue.enqueue(fixtures.say_hello, job_id='1234:4321')
def test_create_job_with_async(self):
"""test creating jobs with async function"""
queue = Queue(connection=self.connection)
async_job = queue.enqueue(fixtures.say_hello_async, job_id='async_job')
sync_job = queue.enqueue(fixtures.say_hello, job_id='sync_job')
self.assertEqual(async_job.id, 'async_job')
self.assertEqual(sync_job.id, 'sync_job')
async_task_result = async_job.perform()
sync_task_result = sync_job.perform()
self.assertEqual(sync_task_result, async_task_result)
def test_get_call_string_unicode(self):
"""test call string with unicode keyword arguments"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.echo, arg_with_unicode=fixtures.UnicodeStringObject())
self.assertIsNotNone(job.get_call_string())
job.perform()
def test_create_job_from_static_method(self):
"""test creating jobs with static method"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.ClassWithAStaticMethod.static_method)
self.assertIsNotNone(job.get_call_string())
job.perform()
def test_create_job_with_ttl_should_have_ttl_after_enqueued(self):
"""test creating jobs with ttl and checks if get_jobs returns it properly [issue502]"""
queue = Queue(connection=self.connection)
queue.enqueue(fixtures.say_hello, job_id='1234', ttl=10)
job = queue.get_jobs()[0]
self.assertEqual(job.ttl, 10)
def test_create_job_with_ttl_should_expire(self):
"""test if a job created with ttl expires [issue502]"""
queue = Queue(connection=self.connection)
queue.enqueue(fixtures.say_hello, job_id='1234', ttl=1)
time.sleep(1.1)
self.assertEqual(0, len(queue.get_jobs()))
def test_create_and_cancel_job(self):
"""Ensure job.cancel() works properly"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello)
self.assertEqual(1, len(queue.get_jobs()))
cancel_job(job.id, connection=self.connection)
self.assertEqual(0, len(queue.get_jobs()))
registry = CanceledJobRegistry(connection=self.connection, queue=queue)
self.assertIn(job, registry)
self.assertEqual(job.get_status(), JobStatus.CANCELED)
# If job is deleted, it's also removed from CanceledJobRegistry
job.delete()
self.assertNotIn(job, registry)
def test_create_and_cancel_job_fails_already_canceled(self):
"""Ensure job.cancel() fails on already canceled job"""
queue = Queue(connection=self.connection)
job = queue.enqueue(fixtures.say_hello, job_id='fake_job_id')
self.assertEqual(1, len(queue.get_jobs()))
# First cancel should be fine
cancel_job(job.id, connection=self.connection)
self.assertEqual(0, len(queue.get_jobs()))
registry = CanceledJobRegistry(connection=self.connection, queue=queue)
self.assertIn(job, registry)
self.assertEqual(job.get_status(), JobStatus.CANCELED)
# Second cancel should fail
self.assertRaisesRegex(
InvalidJobOperation,
r'Cannot cancel already canceled job: fake_job_id',
cancel_job,
job.id,
connection=self.connection,
)
def test_create_and_cancel_job_with_serializer(self):
"""test creating and using cancel_job (with serializer) deletes job properly"""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(fixtures.say_hello)
self.assertEqual(1, len(queue.get_jobs()))
cancel_job(job.id, serializer=JSONSerializer, connection=self.connection)
self.assertEqual(0, len(queue.get_jobs()))
def test_key_for_should_return_prefixed_job_id(self):
"""test redis key to store job hash under"""
job_id = 'random'
key = Job.key_for(job_id=job_id)
assert key == Job.redis_job_namespace_prefix + job_id
@min_redis_version((5, 0, 0))
def test_blocking_result_fetch(self):
# Ensure blocking waits for the time to run the job, but not right up until the timeout.
job_sleep_seconds = 2
block_seconds = 5
queue_name = 'test_blocking_queue'
q = Queue(queue_name, connection=self.connection)
job = q.enqueue(fixtures.long_running_job, job_sleep_seconds)
started_at = time.time()
fixtures.start_worker_process(queue_name, connection=self.connection, burst=True)
result = job.latest_result(timeout=block_seconds)
blocked_for = time.time() - started_at
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertIsNotNone(result)
self.assertGreaterEqual(blocked_for, job_sleep_seconds)
self.assertLess(blocked_for, block_seconds)
| TestJob |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0074_safe_delete_actiongroupstatus.py | {
"start": 240,
"end": 1554
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0932_update_grouptombstone_with_auto_now_add"),
("workflow_engine", "0073_safe_pending_delete_actiongroupstatus"),
]
operations = [
SafeDeleteModel(name="ActionGroupStatus", deletion_action=DeletionAction.DELETE),
]
| Migration |
python | joke2k__faker | faker/providers/person/lt_LT/__init__.py | {
"start": 44,
"end": 4643
} | class ____(PersonProvider):
formats = (
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{last_name}}, {{first_name}}",
)
first_names = (
"Tomas",
"Lukas",
"Mantas",
"Deividas",
"Arnas",
"Artūras",
"Karolis",
"Dovydas",
"Dominykas",
"Darius",
"Edvinas",
"Jonas",
"Martynas",
"Kajus",
"Donatas",
"Andrius",
"Matas",
"Rokas",
"Augustas",
"Danielius",
"Mindaugas",
"Paulius",
"Marius",
"Armandas",
"Edgaras",
"Jokūbas",
"Nedas",
"Tadas",
"Nerijus",
"Simonas",
"Vytautas",
"Artūras",
"Robertas",
"Eimantas",
"Arijus",
"Nojus",
"Egidijus",
"Aurimas",
"Emilis",
"Laurynas",
"Edvardas",
"Joris",
"Pijus",
"Erikas",
"Domas",
"Vilius",
"Evaldas",
"Justinas",
"Aleksandras",
"Kristupas",
"Gabrielius",
"Benas",
"Gytis",
"Arminas",
"Vakris",
"Tautvydas",
"Domantas",
"Justas",
"Markas",
"Antanas",
"Arūnas",
"Ernestas",
"Aronas",
"Vaidas",
"Ąžuolas",
"Titas",
"Giedrius",
"Ignas",
"Povilas",
"Saulius",
"Julius",
"Arvydas",
"Kęstutis",
"Rytis",
"Aistis",
"Gediminas",
"Algirdas",
"Naglis",
"Irmantas",
"Rolandas",
"Aivaras",
"Simas",
"Faustas",
"Ramūnas",
"Šarūnas",
"Gustas",
"Tajus",
"Dainius",
"Arnoldas",
"Linas",
"Rojus",
"Adomas",
"Žygimantas",
"Ričardas",
"Orestas",
"Kipras",
"Juozas",
"Audrius",
"Romualdas",
"Petras",
"Eleonora",
"Raminta",
"Dovilė",
"Sandra",
"Dominyka",
"Ana",
"Erika",
"Kristina",
"Gintarė",
"Rūta",
"Edita",
"Karina",
"Živilė",
"Jolanta",
"Radvilė",
"Ramunė",
"Svetlana",
"Ugnė",
"Eglė",
"Viktorija",
"Justina",
"Brigita",
"Rasa",
"Marija",
"Giedrė",
"Iveta",
"Sonata",
"Vitalija",
"Adrija",
"Goda",
"Paulina",
"Kornelija",
"Liepa",
"Vakarė",
"Milda",
"Meda",
"Vaida",
"Izabelė",
"Jovita",
"Irma",
"Žemyna",
"Leila",
"Rimantė",
"Mantė",
"Rytė",
"Perla",
"Greta",
"Monika",
"Ieva",
"Indrė",
"Ema",
"Aurelija",
"Smiltė",
"Ingrida",
"Simona",
"Amelija",
"Sigita",
"Olivija",
"Laurita",
"Jorūnė",
"Leticija",
"Vigilija",
"Medėja",
"Laura",
"Agnė",
"Evelina",
"Kotryna",
"Lėja",
"Aušra",
"Neringa",
"Gerda",
"Jurgita",
"Rusnė",
"Aušrinė",
"Rita",
"Elena",
"Ineta",
"Ligita",
"Vasarė",
"Vėjūnė",
"Ignė",
"Gytė",
"Ariana",
"Arielė",
"Vytė",
"Eidvilė",
"Karolina",
"Miglė",
"Viltė",
"Jolanta",
"Enrika",
"Aurėja",
"Vanesa",
"Darija",
"Reda",
"Milana",
"Rugilė",
"Diana",
)
last_names = (
"Kazlauskas",
"Jankauskas",
"Petrauskas",
"Pocius",
"Stankevičius",
"Vsiliauskas",
"Žukauskas",
"Butkus",
"Paulauskas",
"Urbonas",
"Kavaliauskas",
"Sakalauskas",
"Žukauskas",
"Akelis",
"Ambrasas",
"Kairys",
"Kalvaitis",
"Kalvelis",
"Kalvėnas",
"Kaupas",
"Kiška",
"Gagys",
"Gailius",
"Gailys",
"Gaižauskas",
"Gaičiūnas",
"Galdikas",
"Gintalas",
"Ginzburgas",
"Grinius",
"Gronskis",
"Nagys",
"Naujokas",
"Narušis",
"Nausėda",
"Poška",
"Povilonis",
)
| Provider |
python | getsentry__sentry | src/sentry/api/paginator.py | {
"start": 8493,
"end": 9164
} | class ____(BasePaginator):
multiplier = 1000
def get_item_key(self, item, for_prev=False):
value = getattr(item, self.key)
value = float(value.strftime("%s.%f")) * self.multiplier
return int(math.floor(value) if self._is_asc(for_prev) else math.ceil(value))
def value_from_cursor(self, cursor):
return datetime.fromtimestamp(float(cursor.value) / self.multiplier).replace(
tzinfo=timezone.utc
)
# TODO(dcramer): previous cursors are too complex at the moment for many things
# and are only useful for polling situations. The OffsetPaginator ignores them
# entirely and uses standard paging
| DateTimePaginator |
python | joke2k__faker | faker/providers/date_time/fr_CA/__init__.py | {
"start": 71,
"end": 202
} | class ____(FRFRProvider):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| Provider |
python | getsentry__sentry | src/sentry/metrics/base.py | {
"start": 677,
"end": 2628
} | class ____(local):
def __init__(self, prefix: str | None = None) -> None:
if prefix is None:
prefix = settings.SENTRY_METRICS_PREFIX
self.prefix = prefix
def _get_key(self, key: str) -> str:
if self.prefix:
return f"{self.prefix}{key}"
return key
def _should_sample(self, sample_rate: float) -> bool:
return sample_rate >= 1 or random() >= 1 - sample_rate
def incr(
self,
key: str,
instance: str | None = None,
tags: Tags | None = None,
amount: float | int = 1,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
raise NotImplementedError
def timing(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
stacklevel: int = 0,
) -> None:
raise NotImplementedError
def gauge(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
raise NotImplementedError
def distribution(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
raise NotImplementedError
def event(
self,
title: str,
message: str,
alert_type: str | None = None,
aggregation_key: str | None = None,
source_type_name: str | None = None,
priority: str | None = None,
instance: str | None = None,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
raise NotImplementedError
| MetricsBackend |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 11215,
"end": 11362
} | class ____(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
table_name: str
| NoReferenceError |
python | getsentry__sentry | tests/sentry/sentry_apps/api/bases/test_sentryapps.py | {
"start": 5074,
"end": 5773
} | class ____(TestCase):
def setUp(self) -> None:
self.endpoint = SentryAppBaseEndpoint()
self.request = drf_request_from_request(self.make_request(user=self.user, method="GET"))
self.sentry_app = self.create_sentry_app(name="foo", organization=self.organization)
def test_retrieves_sentry_app(self) -> None:
args, kwargs = self.endpoint.convert_args(self.request, self.sentry_app.slug)
assert kwargs["sentry_app"].id == self.sentry_app.id
def test_raises_when_sentry_app_not_found(self) -> None:
with pytest.raises(SentryAppError):
self.endpoint.convert_args(self.request, "notanapp")
@control_silo_test
| SentryAppBaseEndpointTest |
python | google__jax | jax/_src/interpreters/ad.py | {
"start": 25277,
"end": 25875
} | class ____(GradAccum):
aval: core.AbstractValue
ref: AbstractRef | None
def __init__(self, aval, ref=None):
self.aval = aval
self.ref = ref
def accum(self, x):
assert x is not Zero
if isinstance(x, Zero) or x is None:
return
elif self.ref is None:
self.ref = core.new_ref(x)
else:
self.ref.addupdate(x)
def freeze(self):
if self.ref is None:
return Zero(self.aval)
else:
return core.freeze(self.ref)
def inst(self):
if self.ref is None:
self.ref = core.new_ref(zeros_like_aval(self.aval))
return self
| RefAccum |
python | openai__openai-python | src/openai/cli/_api/files.py | {
"start": 1314,
"end": 1383
} | class ____(BaseModel):
file: str
purpose: str
| CLIFileCreateArgs |
python | pennersr__django-allauth | allauth/socialaccount/providers/tumblr_oauth2/provider.py | {
"start": 375,
"end": 825
} | class ____(OAuth2Provider):
id = "tumblr_oauth2"
name = "Tumblr"
account_class = TumblrAccount
oauth2_adapter_class = TumblrOAuth2Adapter
def extract_uid(self, data):
return data["name"]
def extract_common_fields(self, data):
return dict(
first_name=data.get("name"),
)
def get_default_scope(self):
return ["read"]
provider_classes = [TumblrOAuth2Provider]
| TumblrOAuth2Provider |
python | django__django | tests/update/models.py | {
"start": 732,
"end": 791
} | class ____(C):
a = models.ForeignKey(A, models.CASCADE)
| D |
python | getsentry__sentry | tests/sentry/uptime/consumers/test_results_consumer.py | {
"start": 2481,
"end": 56516
} | class ____(ConfigPusherTestMixin, metaclass=abc.ABCMeta):
__test__ = Abstract(__module__, __qualname__)
@property
@abc.abstractmethod
def strategy_processing_mode(self) -> Literal["batched-parallel", "parallel", "serial"]:
pass
def setUp(self) -> None:
super().setUp()
self.partition = Partition(Topic("test"), 0)
self.subscription = self.create_uptime_subscription(
subscription_id=uuid.uuid4().hex, interval_seconds=300, region_slugs=["default"]
)
self.detector = self.create_uptime_detector(
uptime_subscription=self.subscription,
downtime_threshold=2,
recovery_threshold=2,
owner=self.user,
)
def send_result(
self, result: CheckResult, consumer: ProcessingStrategy[KafkaPayload] | None = None
):
codec = kafka_definition.get_topic_codec(kafka_definition.Topic.UPTIME_RESULTS)
message = Message(
BrokerValue(
KafkaPayload(None, codec.encode(result), []),
self.partition,
1,
datetime.now(),
)
)
with self.feature(UptimeDomainCheckFailure.build_ingest_feature_name()):
if consumer is None:
factory = UptimeResultsStrategyFactory(
consumer_group="test", mode=self.strategy_processing_mode
)
commit = mock.Mock()
consumer = factory.create_with_partitions(commit, {self.partition: 0})
consumer.submit(message)
def decode_trace_item(self, payload_value: bytes) -> TraceItem:
"""Helper to decode a TraceItem from the produced Kafka payload."""
codec = get_topic_codec(KafkaTopic.SNUBA_ITEMS)
return codec.decode(payload_value)
def test(self) -> None:
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with (
self.feature("organizations:uptime"),
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
):
# First processed result does NOT create an occurrence since we
# have not yet met the active threshold
self.send_result(
self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
assert not Group.objects.filter(grouphash__hash=hashed_fingerprint).exists()
metrics.incr.reset_mock()
# Second processed result DOES create an occurrence since we met
# the threshold
self.send_result(
self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
group = Group.objects.get(grouphash__hash=hashed_fingerprint)
assert group.issue_type == UptimeDomainCheckFailure
assignee = group.get_assignee()
assert assignee and (assignee.id == self.user.id)
self.detector.refresh_from_db()
detector_state = self.detector.detectorstate_set.first()
assert detector_state is not None
assert detector_state.priority_level == DetectorPriorityLevel.HIGH
assert detector_state.is_triggered
# Issue is resolved
with self.feature("organizations:uptime"):
# First processed result does NOT resolve since we have not yet met
# the recovery threshold
self.send_result(
self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=3),
)
)
assert not Group.objects.filter(
grouphash__hash=hashed_fingerprint, status=GroupStatus.RESOLVED
).exists()
# Issue is resolved once the threshold is met
self.send_result(
self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=2),
)
)
assert Group.objects.filter(
grouphash__hash=hashed_fingerprint, status=GroupStatus.RESOLVED
).exists()
def test_does_nothing_when_missing_detector(self) -> None:
features = [
"organizations:uptime",
]
self.detector.delete()
result = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
with (
self.feature(features),
mock.patch("sentry.remote_subscriptions.consumers.result_consumer.logger") as logger,
mock.patch(
"sentry.uptime.consumers.results_consumer.delete_uptime_subscription"
) as mock_delete_uptime_subscription,
):
# Does not produce an error
self.send_result(result)
assert not logger.exception.called
mock_delete_uptime_subscription.assert_called_with(self.subscription)
def test_no_create_issues_option(self) -> None:
self.detector.config.update({"downtime_threshold": 1, "recovery_threshold": 1})
self.detector.save()
result = self.create_uptime_result(self.subscription.subscription_id)
with (
self.options({"uptime.create-issues": False}),
self.feature("organizations:uptime"),
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
)
]
)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
self.detector.refresh_from_db()
detector_state = self.detector.detectorstate_set.first()
assert detector_state is not None
assert detector_state.priority_level == DetectorPriorityLevel.HIGH
assert detector_state.is_triggered
def test_no_subscription(self) -> None:
features = [
"organizations:uptime",
]
subscription_id = uuid.uuid4().hex
result = self.create_uptime_result(subscription_id, uptime_region="default")
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.subscription_not_found",
tags={"uptime_region": "default"},
sample_rate=1.0,
)
]
)
self.assert_redis_config(
"default", UptimeSubscription(subscription_id=subscription_id), "delete", None
)
def test_organization_feature_disabled(self) -> None:
"""
Tests that we do not process results for disabled project subscriptions
"""
# Second disabled project subscription
result = self.create_uptime_result(self.subscription.subscription_id)
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature({"organizations:uptime": False}),
):
self.send_result(result)
handle_result_calls = [
c
for c in metrics.incr.mock_calls
if c[1][0] == "uptime.result_processor.handle_result_for_project"
]
assert len(handle_result_calls) == 0
metrics.incr.assert_has_calls(
[
call("uptime.result_processor.dropped_no_feature"),
]
)
def test_missed_check_false_positive(self) -> None:
result = self.create_uptime_result(self.subscription.subscription_id)
# Pretend we got a result 3500 seconds ago (nearly an hour); the subscription
# has an interval of 300 seconds, which we're going to say was just recently
# changed. Verify we don't emit any metrics recording of a missed check
get_cluster().set(
build_last_update_key(self.detector),
int(result["scheduled_check_time_ms"]) - (3500 * 1000),
)
get_cluster().set(
build_last_seen_interval_key(self.detector),
3600 * 1000,
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature("organizations:uptime"),
):
self.send_result(result)
logger.info.assert_any_call(
"uptime.result_processor.false_num_missing_check",
extra={**result},
)
def test_missed_check_updated_interval(self) -> None:
result = self.create_uptime_result(self.subscription.subscription_id)
# Pretend we got a result 3500 seconds ago (nearly an hour); the subscription
# has an interval of 300 seconds, which we're going to say was just recently
# changed. Verify we don't emit any metrics recording of a missed check
get_cluster().set(
build_last_update_key(self.detector),
int(result["scheduled_check_time_ms"]) - (3500 * 1000),
)
get_cluster().set(
build_last_seen_interval_key(self.detector),
3600 * 1000,
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature("organizations:uptime"),
):
self.send_result(result)
logger.info.assert_any_call(
"uptime.result_processor.false_num_missing_check",
extra={**result},
)
# Send another check that should now be classified as a miss
result = self.create_uptime_result(self.subscription.subscription_id)
result["scheduled_check_time_ms"] = int(result["scheduled_check_time_ms"]) + (600 * 1000)
result["actual_check_time_ms"] = result["scheduled_check_time_ms"]
with (
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature("organizations:uptime"),
):
self.send_result(result)
logger.info.assert_any_call(
"uptime.result_processor.num_missing_check",
extra={"num_missed_checks": 1, **result},
)
@mock.patch("sentry.uptime.consumers.eap_producer._eap_items_producer.produce")
def test_no_missed_check_for_disabled(self, mock_produce: MagicMock) -> None:
result = self.create_uptime_result(self.subscription.subscription_id)
# Pretend we got a result 900 seconds ago; the subscription
# has an interval of 300 seconds. We've missed two checks.
last_update_time = int(result["scheduled_check_time_ms"]) - (900 * 1000)
get_cluster().set(
build_last_update_key(self.detector),
last_update_time,
)
get_cluster().set(
build_last_seen_interval_key(self.detector),
300 * 1000,
)
# Enabling and disabling should clear the last_update_time, and we
# will not produce any synthetic checks
disable_uptime_detector(self.detector)
enable_uptime_detector(self.detector)
with (self.feature("organizations:uptime"),):
self.send_result(result)
assert mock_produce.call_count == 1
check = self.decode_trace_item(mock_produce.call_args_list[0].args[1].value)
assert check.attributes["check_status"].string_value == "failure"
@mock.patch("sentry.uptime.consumers.eap_producer._eap_items_producer.produce")
def test_missed_check_true_positive(self, mock_produce: MagicMock) -> None:
result = self.create_uptime_result(self.subscription.subscription_id)
# Pretend we got a result 900 seconds ago; the subscription
# has an interval of 300 seconds. We've missed two checks.
last_update_time = int(result["scheduled_check_time_ms"]) - (900 * 1000)
get_cluster().set(
build_last_update_key(self.detector),
last_update_time,
)
get_cluster().set(
build_last_seen_interval_key(self.detector),
300 * 1000,
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature("organizations:uptime"),
):
self.send_result(result)
assert mock_produce.call_count == 3
synth_1 = self.decode_trace_item(mock_produce.call_args_list[0].args[1].value)
synth_2 = self.decode_trace_item(mock_produce.call_args_list[1].args[1].value)
synth_3 = self.decode_trace_item(mock_produce.call_args_list[2].args[1].value)
assert synth_1.attributes["check_status"].string_value == "missed_window"
assert synth_2.attributes["check_status"].string_value == "missed_window"
assert synth_3.attributes["check_status"].string_value == "failure"
assert (
synth_1.attributes["scheduled_check_time_us"].int_value
== (last_update_time + 300 * 1000) * 1000
)
assert (
synth_2.attributes["scheduled_check_time_us"].int_value
== (last_update_time + 600 * 1000) * 1000
)
logger.info.assert_any_call(
"uptime.result_processor.num_missing_check",
extra={"num_missed_checks": 2, **result},
)
def test_skip_already_processed(self) -> None:
features = [
"organizations:uptime",
]
result = self.create_uptime_result(self.subscription.subscription_id)
get_cluster().set(
build_last_update_key(self.detector),
int(result["scheduled_check_time_ms"]),
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
call(
"uptime.result_processor.skipping_already_processed_update",
tags={
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
def test_skip_shadow_region(self) -> None:
features = [
"organizations:uptime",
]
region_name = "shadow"
self.create_uptime_subscription_region(
self.subscription, region_name, UptimeSubscriptionRegion.RegionMode.SHADOW
)
result = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
uptime_region=region_name,
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.dropped_shadow_result",
sample_rate=1.0,
tags={
"status": CHECKSTATUS_FAILURE,
"host_provider": "TEST",
"uptime_region": "shadow",
},
),
]
)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
def test_missed(self) -> None:
features = [
"organizations:uptime",
]
result = self.create_uptime_result(
self.subscription.subscription_id, status=CHECKSTATUS_MISSED_WINDOW
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_called_once_with(
"uptime.result_processor.handle_result_for_project",
tags={
"status": CHECKSTATUS_MISSED_WINDOW,
"mode": "auto_detected_active",
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
)
logger.info.assert_any_call(
"handle_result_for_project.missed",
extra={"project_id": self.project.id, **result},
)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
def test_disallowed(self) -> None:
features = [
"organizations:uptime",
"organizations:uptime-create-issues",
]
result = self.create_uptime_result(
self.subscription.subscription_id, status=CHECKSTATUS_DISALLOWED_BY_ROBOTS
)
with (
mock.patch("sentry.uptime.consumers.results_consumer.logger") as logger,
self.feature(features),
):
assert self.detector.enabled
self.send_result(result)
logger.info.assert_any_call(
"disallowed_by_robots",
extra={**result},
)
self.detector.refresh_from_db()
assert not self.detector.enabled
def test_onboarding_failure(self) -> None:
features = [
"organizations:uptime",
]
# Update detector mode configuration
self.detector.update(
config={
**self.detector.config,
"mode": UptimeMonitorMode.AUTO_DETECTED_ONBOARDING.value,
}
)
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_FAILURE,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
redis = get_cluster()
key = build_onboarding_failure_key(self.detector)
assert redis.get(key) is None
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_onboarding",
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
assert redis.get(key) == "1"
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_FAILURE,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
with (
mock.patch("sentry.quotas.backend.remove_seat") as mock_remove_seat,
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as consumer_metrics,
mock.patch("sentry.uptime.autodetect.result_handler.metrics") as onboarding_metrics,
mock.patch(
"sentry.uptime.autodetect.result_handler.ONBOARDING_FAILURE_THRESHOLD", new=2
),
self.tasks(),
self.feature(features),
):
remove_call_vals = []
def capture_remove_seat(data_category, seat_object):
remove_call_vals.append((data_category, seat_object.id))
mock_remove_seat.side_effect = capture_remove_seat
self.send_result(result)
consumer_metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_onboarding",
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
onboarding_metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.autodetection.failed_onboarding",
tags={
"failure_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"uptime_region": "us-west",
"host_provider": "TEST",
"status": CHECKSTATUS_FAILURE,
},
sample_rate=1.0,
),
]
)
assert not redis.exists(key)
assert is_failed_url(self.subscription.url)
# XXX: Since project_subscription is mutable, the delete sets the id to null. So we're unable
# to compare the calls directly. Instead, we add a side effect to the mock so that it keeps track of
# the values we want to check.
assert remove_call_vals == [(DataCategory.UPTIME, self.detector.id)]
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
with pytest.raises(UptimeSubscription.DoesNotExist):
self.subscription.refresh_from_db()
# Detector should be marked for pending deletion when subscription is removed
self.detector.refresh_from_db()
assert self.detector.status == ObjectStatus.PENDING_DELETION
def test_onboarding_success_ongoing(self) -> None:
features = [
"organizations:uptime",
]
self.detector.update(
config={
**self.detector.config,
"mode": UptimeMonitorMode.AUTO_DETECTED_ONBOARDING.value,
},
date_added=datetime.now(timezone.utc)
- (ONBOARDING_MONITOR_PERIOD + timedelta(minutes=5)),
)
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
redis = get_cluster()
key = build_onboarding_failure_key(self.detector)
assert redis.get(key) is None
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
self.feature(features),
):
self.send_result(result)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_SUCCESS,
"mode": "auto_detected_onboarding",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
assert not redis.exists(key)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
def test_onboarding_success_graduate(self) -> None:
features = [
"organizations:uptime",
]
self.detector.update(
config={
**self.detector.config,
"mode": UptimeMonitorMode.AUTO_DETECTED_ONBOARDING.value,
},
date_added=datetime.now(timezone.utc)
- (ONBOARDING_MONITOR_PERIOD + timedelta(minutes=5)),
)
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=2),
)
redis = get_cluster()
key = build_onboarding_failure_key(self.detector)
assert redis.get(key) is None
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as consumer_metrics,
mock.patch("sentry.uptime.autodetect.result_handler.metrics") as onboarding_metrics,
mock.patch(
"sentry.uptime.autodetect.result_handler.send_auto_detected_notifications"
) as mock_email_task,
self.tasks(),
self.feature(features),
):
self.send_result(result)
consumer_metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_SUCCESS,
"mode": "auto_detected_onboarding",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
onboarding_metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.autodetection.graduated_onboarding",
tags={
"status": CHECKSTATUS_SUCCESS,
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
mock_email_task.delay.assert_called_once_with(self.detector.id)
assert not redis.exists(key)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
self.detector.refresh_from_db()
assert self.detector.config["mode"] == UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value
self.subscription.refresh_from_db()
assert self.subscription.interval_seconds == int(
AUTO_DETECTED_ACTIVE_SUBSCRIPTION_INTERVAL.total_seconds()
)
assert self.subscription.url == self.subscription.url
def test_onboarding_graduation_no_seat_available(self) -> None:
"""
Test that when an onboarding monitor tries to graduate to active status
but no seat is available, the detector is deleted.
"""
features = [
"organizations:uptime",
]
self.detector.update(
config={
**self.detector.config,
"mode": UptimeMonitorMode.AUTO_DETECTED_ONBOARDING.value,
},
date_added=datetime.now(timezone.utc)
- (ONBOARDING_MONITOR_PERIOD + timedelta(minutes=5)),
)
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=2),
)
redis = get_cluster()
key = build_onboarding_failure_key(self.detector)
assert redis.get(key) is None
with (
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as consumer_metrics,
mock.patch("sentry.uptime.autodetect.result_handler.metrics") as onboarding_metrics,
mock.patch("sentry.uptime.autodetect.result_handler.logger") as onboarding_logger,
mock.patch(
"sentry.uptime.autodetect.result_handler.update_uptime_detector",
side_effect=UptimeMonitorNoSeatAvailable(
SeatAssignmentResult(assignable=False, reason="Testing")
),
),
self.tasks(),
self.feature(features),
):
self.send_result(result)
consumer_metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_SUCCESS,
"mode": "auto_detected_onboarding",
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
),
]
)
onboarding_metrics.incr.assert_called_once_with(
"uptime.result_processor.autodetection.graduated_onboarding_no_seat",
tags={
"status": CHECKSTATUS_SUCCESS,
"uptime_region": "us-west",
"host_provider": "TEST",
},
sample_rate=1.0,
)
onboarding_logger.info.assert_called_once_with(
"uptime_onboarding_graduated_no_seat",
extra={
"project_id": self.detector.project_id,
"url": self.subscription.url,
**result,
},
)
self.detector.refresh_from_db()
assert self.detector.status == ObjectStatus.PENDING_DELETION
with pytest.raises(UptimeSubscription.DoesNotExist):
self.subscription.refresh_from_db()
assert not redis.exists(key)
fingerprint = build_detector_fingerprint_component(self.detector).encode("utf-8")
hashed_fingerprint = md5(fingerprint).hexdigest()
with pytest.raises(Group.DoesNotExist):
Group.objects.get(grouphash__hash=hashed_fingerprint)
def test_parallel(self) -> None:
"""
Validates that the consumer in parallel mode correctly groups check-ins
into groups by their monitor slug / environment
"""
factory = UptimeResultsStrategyFactory(
consumer_group="test",
mode="batched-parallel",
max_batch_size=3,
max_workers=1,
)
consumer = factory.create_with_partitions(mock.Mock(), {self.partition: 0})
with mock.patch.object(type(factory.result_processor), "__call__") as mock_processor_call:
subscription_2 = self.create_uptime_subscription(
subscription_id=uuid.uuid4().hex, interval_seconds=300, url="http://santry.io"
)
result_1 = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
self.send_result(result_1, consumer=consumer)
result_2 = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
self.send_result(result_2, consumer=consumer)
# This will fill the batch
result_3 = self.create_uptime_result(
subscription_2.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
self.send_result(result_3, consumer=consumer)
# Should be no calls yet, since we didn't send the batch
assert mock_processor_call.call_count == 0
# One more causes the previous batch to send
self.send_result(
self.create_uptime_result(
subscription_2.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=3),
),
consumer=consumer,
)
assert mock_processor_call.call_count == 3
mock_processor_call.assert_has_calls(
[call("uptime", result_1), call("uptime", result_2), call("uptime", result_3)]
)
@mock.patch(
"sentry.remote_subscriptions.consumers.result_consumer.ResultsStrategyFactory.process_group"
)
def test_parallel_grouping(self, mock_process_group: MagicMock) -> None:
"""
Validates that the consumer in parallel mode correctly groups check-ins
into groups by their monitor slug / environment
"""
factory = UptimeResultsStrategyFactory(
consumer_group="test",
mode="batched-parallel",
max_batch_size=3,
max_workers=1,
)
consumer = factory.create_with_partitions(mock.Mock(), {self.partition: 0})
subscription_2 = self.create_uptime_subscription(
subscription_id=uuid.uuid4().hex, interval_seconds=300, url="http://santry.io"
)
result_1 = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
self.send_result(result_1, consumer=consumer)
result_2 = self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
self.send_result(result_2, consumer=consumer)
# This will fill the batch
result_3 = self.create_uptime_result(
subscription_2.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
self.send_result(result_3, consumer=consumer)
# Should be no calls yet, since we didn't send the batch
assert mock_process_group.call_count == 0
# One more causes the previous batch to send
self.send_result(result_3, consumer=consumer)
assert mock_process_group.call_count == 2
group_1 = mock_process_group.mock_calls[0].args[0]
group_2 = mock_process_group.mock_calls[1].args[0]
assert group_1 == [result_1, result_2]
assert group_2 == [result_3]
def test_provider_stats(self) -> None:
features = [
"organizations:uptime",
]
subscription = self.create_uptime_subscription(
subscription_id=uuid.uuid4().hex,
host_provider_name="test_provider",
)
self.create_uptime_detector(self.project, uptime_subscription=subscription)
self.create_uptime_subscription(
subscription_id=uuid.uuid4().hex,
host_provider_name="test_provider",
)
with (
self.feature(features),
mock.patch("sentry.uptime.consumers.results_consumer.metrics") as metrics,
mock.patch(
"sentry.uptime.consumers.results_consumer.TOTAL_PROVIDERS_TO_INCLUDE_AS_TAGS",
new=1,
),
):
self.send_result(
self.create_uptime_result(
subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
)
self.send_result(
self.create_uptime_result(
self.subscription.subscription_id,
scheduled_check_time=datetime.now() - timedelta(minutes=4),
)
)
metrics.incr.assert_has_calls(
[
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "test_provider",
},
sample_rate=1.0,
),
call(
"uptime.result_processor.handle_result_for_project",
tags={
"status_reason": CHECKSTATUSREASONTYPE_TIMEOUT,
"status": CHECKSTATUS_FAILURE,
"mode": "auto_detected_active",
"uptime_region": "us-west",
"host_provider": "other",
},
sample_rate=1.0,
),
]
)
@mock.patch("sentry.uptime.consumers.eap_producer._eap_items_producer.produce")
def test_produces_snuba_uptime_results(self, mock_produce: MagicMock) -> None:
"""
Validates that the consumer produces a message to Snuba's Kafka topic for uptime check results
"""
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
self.send_result(result)
mock_produce.assert_called_once()
assert mock_produce.call_args.args[0].name == "snuba-items"
trace_item = self.decode_trace_item(mock_produce.call_args.args[1].value)
assert trace_item.organization_id == self.project.organization_id
assert trace_item.project_id == self.project.id
assert trace_item.attributes["incident_status"].int_value == 0
assert trace_item.retention_days == 90
@mock.patch("sentry.uptime.consumers.eap_producer._eap_items_producer.produce")
def test_produces_snuba_uptime_results_in_incident(self, mock_produce: MagicMock) -> None:
"""
Validates that the consumer produces a message to Snuba's Kafka topic for uptime check results
"""
self.detector.config.update({"downtime_threshold": 1, "recovery_threshold": 1})
self.detector.save()
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_FAILURE,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
self.send_result(result)
mock_produce.assert_called_once()
assert mock_produce.call_args.args[0].name == "snuba-items"
trace_item = self.decode_trace_item(mock_produce.call_args.args[1].value)
assert trace_item.attributes["incident_status"].int_value == 1
@mock.patch("sentry.uptime.consumers.eap_producer._eap_items_producer.produce")
def test_produces_eap_uptime_results(self, mock_produce: MagicMock) -> None:
"""
Validates that the consumer produces TraceItems to EAP's Kafka topic for uptime check results
"""
result = self.create_uptime_result(
self.subscription.subscription_id,
status=CHECKSTATUS_SUCCESS,
scheduled_check_time=datetime.now() - timedelta(minutes=5),
)
self.send_result(result)
mock_produce.assert_called_once()
assert "snuba-items" in mock_produce.call_args.args[0].name
payload = mock_produce.call_args.args[1]
assert payload.key is None
assert payload.headers == []
expected_trace_items = convert_uptime_result_to_trace_items(
self.project, result, IncidentStatus.NO_INCIDENT
)
codec = get_topic_codec(KafkaTopic.SNUBA_ITEMS)
assert [codec.decode(payload.value)] == expected_trace_items
def run_check_and_update_region_test(
self,
sub: UptimeSubscription,
regions: list[str],
region_overrides: dict[str, UptimeSubscriptionRegion.RegionMode],
expected_regions_before: dict[str, UptimeSubscriptionRegion.RegionMode],
expected_regions_after: dict[str, UptimeSubscriptionRegion.RegionMode],
expected_config_updates: list[
tuple[str, str | None, UptimeSubscriptionRegion.RegionMode | None]
],
current_minute=5,
):
region_configs = [
UptimeRegionConfig(slug=slug, name=slug, config_redis_key_prefix=slug)
for slug in regions
]
with (
override_settings(UPTIME_REGIONS=region_configs),
override_options({"uptime.checker-regions-mode-override": region_overrides}),
self.tasks(),
freeze_time((datetime.now() - timedelta(hours=1)).replace(minute=current_minute)),
mock.patch("random.random", return_value=1),
):
result = self.create_uptime_result(
sub.subscription_id,
scheduled_check_time=datetime.now(),
)
assert {
r.region_slug: UptimeSubscriptionRegion.RegionMode(r.mode)
for r in sub.regions.all()
} == expected_regions_before
self.send_result(result)
sub.refresh_from_db()
assert {
r.region_slug: UptimeSubscriptionRegion.RegionMode(r.mode)
for r in sub.regions.all()
} == expected_regions_after
for expected_region, expected_action, expected_mode in expected_config_updates:
self.assert_redis_config(expected_region, sub, expected_action, expected_mode)
assert sub.status == UptimeSubscription.Status.ACTIVE.value
def test_check_and_update_regions(self) -> None:
sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=5).hex,
region_slugs=["region1"],
)
self.create_uptime_detector(uptime_subscription=sub)
self.run_check_and_update_region_test(
sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
[],
4,
)
self.run_check_and_update_region_test(
sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
],
5,
)
def test_check_and_update_regions_active_shadow(self) -> None:
sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=5).hex,
region_slugs=["region1", "region2"],
)
self.create_uptime_detector(uptime_subscription=sub)
self.run_check_and_update_region_test(
sub,
["region1", "region2"],
{"region2": UptimeSubscriptionRegion.RegionMode.SHADOW},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.SHADOW,
},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "upsert", UptimeSubscriptionRegion.RegionMode.SHADOW),
],
5,
)
def test_check_and_update_regions_larger_interval(self) -> None:
# Create subscription with only one region
hour_sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=4).hex,
region_slugs=["region1"],
interval_seconds=UptimeSubscription.IntervalSeconds.ONE_HOUR,
)
self.create_uptime_detector(uptime_subscription=hour_sub)
self.run_check_and_update_region_test(
hour_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
],
37,
)
five_min_sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=6).hex,
region_slugs=["region1"],
interval_seconds=UptimeSubscription.IntervalSeconds.FIVE_MINUTES,
)
self.create_uptime_detector(uptime_subscription=five_min_sub)
self.run_check_and_update_region_test(
five_min_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
[],
current_minute=6,
)
self.run_check_and_update_region_test(
five_min_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
[],
current_minute=35,
)
self.run_check_and_update_region_test(
five_min_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
[],
current_minute=49,
)
self.run_check_and_update_region_test(
five_min_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
],
current_minute=30,
)
# Make sure it works any time within the valid window
five_min_sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=66).hex,
region_slugs=["region1"],
interval_seconds=UptimeSubscription.IntervalSeconds.FIVE_MINUTES,
)
self.create_uptime_detector(uptime_subscription=five_min_sub)
self.run_check_and_update_region_test(
five_min_sub,
["region1", "region2"],
{},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
],
current_minute=34,
)
def test_check_and_update_regions_removes_disabled(self) -> None:
sub = self.create_uptime_subscription(
subscription_id=uuid.UUID(int=5).hex,
region_slugs=["region1", "region2"],
)
self.create_uptime_detector(uptime_subscription=sub)
self.run_check_and_update_region_test(
sub,
["region1", "region2"],
{"region2": UptimeSubscriptionRegion.RegionMode.INACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
[],
current_minute=4,
)
self.run_check_and_update_region_test(
sub,
["region1", "region2"],
{"region2": UptimeSubscriptionRegion.RegionMode.INACTIVE},
{
"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE,
"region2": UptimeSubscriptionRegion.RegionMode.ACTIVE,
},
{"region1": UptimeSubscriptionRegion.RegionMode.ACTIVE},
[
("region1", "upsert", UptimeSubscriptionRegion.RegionMode.ACTIVE),
("region2", "delete", None),
],
current_minute=5,
)
@thread_leak_allowlist(reason="uptime consumers", issue=97045)
| ProcessResultTest |
python | ansible__ansible | test/units/cli/test_galaxy.py | {
"start": 10956,
"end": 14600
} | class ____(object):
expected_role_dirs = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
@classmethod
def setUpRole(cls, role_name, galaxy_args=None, skeleton_path=None, use_explicit_type=False):
if galaxy_args is None:
galaxy_args = []
if skeleton_path is not None:
cls.role_skeleton_path = skeleton_path
galaxy_args += ['--role-skeleton', skeleton_path]
# Make temp directory for testing
cls.test_dir = tempfile.mkdtemp()
cls.role_dir = os.path.join(cls.test_dir, role_name)
cls.role_name = role_name
# create role using default skeleton
args = ['ansible-galaxy']
if use_explicit_type:
args += ['role']
args += ['init', '-c', '--offline'] + galaxy_args + ['--init-path', cls.test_dir, cls.role_name]
gc = GalaxyCLI(args=args)
gc.run()
cls.gc = gc
if skeleton_path is None:
cls.role_skeleton_path = gc.galaxy.default_role_skeleton_path
# DTFIX-FUTURE: use a proper fixture for all of this
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
_AnsibleCollectionFinder._remove()
@classmethod
def tearDownRole(cls):
shutil.rmtree(cls.test_dir, ignore_errors=True)
def test_metadata(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('galaxy_info', metadata, msg='unable to find galaxy_info in metadata')
self.assertIn('dependencies', metadata, msg='unable to find dependencies in metadata')
def test_readme(self):
readme_path = os.path.join(self.role_dir, 'README.md')
self.assertTrue(os.path.exists(readme_path), msg='Readme doesn\'t exist')
def test_main_ymls(self):
need_main_ymls = set(self.expected_role_dirs) - set(['meta', 'tests', 'files', 'templates'])
for d in need_main_ymls:
main_yml = os.path.join(self.role_dir, d, 'main.yml')
self.assertTrue(os.path.exists(main_yml))
if self.role_name == 'delete_me_skeleton':
expected_string = "---\n# {0} file for {1}".format(d, self.role_name)
else:
expected_string = "#SPDX-License-Identifier: MIT-0\n---\n# {0} file for {1}".format(d, self.role_name)
with open(main_yml, 'r') as f:
self.assertEqual(expected_string, f.read().strip())
def test_role_dirs(self):
for d in self.expected_role_dirs:
self.assertTrue(os.path.isdir(os.path.join(self.role_dir, d)), msg="Expected role subdirectory {0} doesn't exist".format(d))
def test_readme_contents(self):
with open(os.path.join(self.role_dir, 'README.md'), 'r') as readme:
contents = readme.read()
with open(os.path.join(self.role_skeleton_path, 'README.md'), 'r') as f:
expected_contents = f.read()
self.assertEqual(expected_contents, contents, msg='README.md does not match expected')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertEqual(test_playbook[0]['remote_user'], 'root')
self.assertListEqual(test_playbook[0]['roles'], [self.role_name], msg='The list of roles included in the test play doesn\'t match')
| ValidRoleTests |
python | ansible__ansible | lib/ansible/vars/manager.py | {
"start": 3880,
"end": 28306
} | class ____:
_ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
_PLAY_HOSTS_DEPRECATED_TAG = _tags.Deprecated(
msg='The `play_hosts` magic variable is deprecated.',
version='2.23',
deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR,
help_text='Use `ansible_play_batch` instead.',
)
def __init__(self, loader: DataLoader | None = None, inventory: InventoryManager | None = None, version_info: dict[str, str] | None = None) -> None:
self._nonpersistent_fact_cache: defaultdict[str, dict] = defaultdict(dict)
self._vars_cache: defaultdict[str, dict] = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars: HostVars | None = None
self._options_vars = load_options_vars(version_info)
# If the basedir is specified as the empty string then it results in cwd being used.
# This is not a safe location to load vars from.
basedir = self._options_vars.get('basedir', False)
self.safe_basedir = bool(basedir is False or basedir)
# load extra vars
self._extra_vars = load_extra_vars(loader=self._loader)
# load fact cache
try:
self._fact_cache = cache_loader.get(C.CACHE_PLUGIN)
except Exception as ex:
# bad cache plugin is not fatal error
# fallback to builtin memory cache plugin
display.error_as_warning(None, ex)
self._fact_cache = cache_loader.get('ansible.builtin.memory') # use FQCN to ensure the builtin version is used
@property
def extra_vars(self):
return self._extra_vars
def set_inventory(self, inventory):
self._inventory = inventory
def get_vars(
self,
play: Play | None = None,
host: Host | None = None,
task: Task | None = None,
include_hostvars: bool = True,
use_cache: bool = True,
_hosts: list[str] | None = None,
_hosts_all: list[str] | None = None,
stage: str = 'task',
) -> dict[str, t.Any]:
"""
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
on the functionality they provide. These arguments may be removed at a later date without a deprecation
period and without warning.
"""
display.debug("in VariableManager get_vars()")
all_vars: dict[str, t.Any] = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
_hosts=_hosts,
_hosts_all=_hosts_all,
)
def _combine_and_track(data, new_data, source):
# FIXME: this no longer does any tracking, only a slight optimization for empty new_data
if new_data == {}:
return data
return combine_vars(data, new_data)
# default for all cases
basedirs = []
if self.safe_basedir: # avoid adhoc/console loading cwd
basedirs = [self._loader.get_basedir()]
if play:
# get role defaults (lowest precedence)
for role in play.roles:
if role.public:
all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
if task:
# set basedirs
if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
basedirs = task.get_search_path()
elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
basedirs = [task.get_search_path()[0]]
elif C.PLAYBOOK_VARS_ROOT != 'top':
# preserves default basedirs, only option pre 2.3
raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
# TODO: investigate why we need play or include_role check?
if task._role is not None and (play or task.action in C._ACTION_INCLUDE_ROLE):
all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()), "role '%s' defaults" % task._role.name)
if host:
# THE 'all' group and the rest of groups for a host, used below
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name != 'all'])
# internal functions that actually do the work
def _plugins_inventory(entities):
""" merges all entities by inventory source """
return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
def _plugins_play(entities):
""" merges all entities adjacent to play """
data = {}
for path in basedirs:
data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
return data
# configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
""" gets group vars from inventory """
return get_group_vars(host_groups)
def groups_plugins_inventory():
""" gets plugin sources from inventory for groups """
return _plugins_inventory(host_groups)
def groups_plugins_play():
""" gets plugin sources from play for groups """
return _plugins_play(host_groups)
def plugins_by_groups():
"""
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
"""
data = {}
for group in host_groups:
data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
return data
# Merge groups as per precedence config
# only allow to call the functions we want exposed
for entry in C.VARIABLE_PRECEDENCE:
if entry in self._ALLOWED:
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
else:
display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
# finally, the facts caches for this host, if they exist
try:
try:
facts = self._fact_cache.get(host.name)
except KeyError:
facts = {}
all_vars |= namespace_facts(facts)
inject, origin = C.config.get_config_value_and_origin('INJECT_FACTS_AS_VARS')
# push facts to main namespace
if inject:
if origin == 'default':
clean_top = {k: _deprecate_top_level_fact(v) for k, v in clean_facts(facts).items()}
else:
clean_top = clean_facts(facts)
all_vars = _combine_and_track(all_vars, clean_top, "facts")
else:
# always 'promote' ansible_local, even if empty
all_vars = _combine_and_track(all_vars, {'ansible_local': facts.get('ansible_local', {})}, "facts")
except KeyError:
pass
if play:
all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
vars_files = play.get_vars_files()
for vars_file_item in vars_files:
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
# NOTE: this makes them depend on host vars/facts so things like
# ansible_facts['os_distribution'] can be used, ala include_vars.
# Consider DEPRECATING this in the future, since we have include_vars ...
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = TemplateEngine(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
if not (isinstance(vars_file, str)):
raise AnsibleParserError(
message=f"Invalid `vars_files` value of type {native_type_name(vars_file)!r}.",
obj=vars_file,
help_text="A `vars_files` value should either be a string or list of strings.",
)
try:
play_search_stack = play.get_search_path()
found_file = self._loader.path_dwim_relative_stack(play_search_stack, 'vars', vars_file)
data = preprocess_vars(self._loader.load_from_file(found_file, unsafe=True, cache='vaulted', trusted_as_template=True))
if data is not None:
for item in data:
all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
display.vvv(f"Read `vars_file` {found_file!r}.")
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
except AnsibleUndefinedVariable:
raise
except Exception as ex:
raise AnsibleParserError(f"Error reading `vars_files` file {vars_file!r}.", obj=vars_file) from ex
except AnsibleUndefinedVariable as ex:
if host is not None:
try:
facts = self._fact_cache.get(host.name)
except KeyError:
pass
else:
if facts.get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
% vars_file_item, obj=vars_file_item) from ex
display.warning("skipping vars_file item due to an undefined variable", obj=vars_file_item)
continue
# We now merge in all exported vars from all roles in the play (very high precedence)
for role in play.roles:
if role.public:
all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False, only_exports=True), "role '%s' exported vars" % role.name)
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False, only_exports=False),
"role '%s' all vars" % task._role.name)
all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
# include_vars non-persistent cache
all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
# fact non-persistent cache
all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
# next, we merge in role params and task include params
if task:
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role params")
all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
# extra vars
all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
# before we add 'reserved vars', check we didn't add any reserved vars
warn_if_reserved(all_vars)
# magic variables
all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# 'vars' magic var
if task or play:
all_vars['vars'] = _DEPRECATE_VARS.tag({})
for k, v in all_vars.items():
# has to be copy, otherwise recursive ref
all_vars['vars'][k] = _DEPRECATE_VARS.tag(v)
display.debug("done with get_vars()")
return all_vars
def _facts_gathered_for_host(self, hostname) -> bool:
try:
facts = self._fact_cache.get(hostname)
except KeyError:
facts = {}
return bool(facts.get('_ansible_facts_gathered', False))
def _get_magic_variables(self, play, host, task, include_hostvars, _hosts=None, _hosts_all=None):
"""
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
"""
variables = {}
variables['playbook_dir'] = self._loader.get_basedir()
variables['ansible_playbook_python'] = sys.executable
variables['ansible_config_file'] = C.CONFIG_FILE
if play:
# using role_cache as play.roles only has 'public' roles for vars exporting
dependency_role_names = list({d.get_name() for r in play.roles for d in r.get_all_dependencies()})
play_role_names = [r.get_name() for r in play.roles]
# ansible_role_names includes all role names, dependent or directly referenced by the play
variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
# ansible_play_role_names includes the names of all roles directly referenced by this play
# roles that are implicitly referenced via dependencies are not listed.
variables['ansible_play_role_names'] = play_role_names
# ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
# dependencies that are also explicitly named as roles are included in this list
variables['ansible_dependent_role_names'] = dependency_role_names
# TODO: data tagging!!! DEPRECATED: role_names should be deprecated in favor of ansible_ prefixed ones
variables['role_names'] = variables['ansible_play_role_names']
variables['ansible_play_name'] = play.get_name()
if task:
if task._role:
variables['role_name'] = task._role.get_name(include_role_fqcn=False)
variables['role_path'] = task._role._role_path
variables['role_uuid'] = str(task._role._uuid)
variables['ansible_collection_name'] = task._role._role_collection
variables['ansible_role_name'] = task._role.get_name()
if self._inventory is not None:
variables['groups'] = self._inventory.get_groups_dict()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
if not _hosts_all:
if not play.finalized and TemplateEngine().is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
_hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
if not _hosts:
_hosts = [h.name for h in self._inventory.get_hosts()]
variables['ansible_play_hosts_all'] = _hosts_all[:]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
# use a static tag instead of `deprecate_value` to avoid stackwalk in a hot code path
variables['play_hosts'] = self._PLAY_HOSTS_DEPRECATED_TAG.tag(variables['ansible_play_batch'])
# Set options vars
for option, option_value in self._options_vars.items():
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def get_delegated_vars_and_hostname(self, templar, task, variables):
"""Get the delegated_vars for an individual task invocation, which may be in the context
of an individual loop iteration.
Not used directly be VariableManager, but used primarily within TaskExecutor
"""
delegated_vars = {}
delegated_host_name = ... # sentinel value distinct from empty/None, which are errors
if task.delegate_to:
try:
delegated_host_name = templar.template(task.delegate_to)
except AnsibleValueOmittedError:
pass
# bypass for unspecified value/omit
if delegated_host_name is ...:
return delegated_vars, None
if not delegated_host_name:
raise AnsibleError('Empty hostname produced from delegate_to: "%s"' % task.delegate_to)
delegated_host = self._inventory.get_host(delegated_host_name)
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_vars['ansible_delegated_vars'] = {
delegated_host_name: self.get_vars(
play=task.get_play(),
host=delegated_host,
task=task,
include_hostvars=True,
)
}
delegated_vars['ansible_delegated_vars'][delegated_host_name]['inventory_hostname'] = variables.get('inventory_hostname')
return delegated_vars, delegated_host_name
def clear_facts(self, hostname):
"""
Clears the facts for a host
"""
try:
self._fact_cache.delete(hostname)
except KeyError:
pass
def set_host_facts(self, host, facts):
"""
Sets or updates the given facts for a host in the fact cache.
"""
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
warn_if_reserved(facts)
try:
host_cache = self._fact_cache.get(host)
except KeyError:
# We get to set this as new
host_cache = facts
else:
if not isinstance(host_cache, MutableMapping):
raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
' a {1}'.format(host, type(host_cache)))
# Update the existing facts
host_cache |= facts
# Save the facts back to the backing store
self._fact_cache.set(host, host_cache)
def set_nonpersistent_facts(self, host, facts):
"""
Sets or updates the given facts for a host in the fact cache.
"""
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
warn_if_reserved(facts)
try:
self._nonpersistent_fact_cache[host] |= facts
except KeyError:
self._nonpersistent_fact_cache[host] = facts
def set_host_variable(self, host, varname, value):
"""
Sets a value in the vars_cache for a host.
"""
warn_if_reserved([varname])
if host not in self._vars_cache:
self._vars_cache[host] = dict()
if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
else:
self._vars_cache[host][varname] = value
| VariableManager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol19.py | {
"start": 365,
"end": 413
} | class ____(Protocol):
y: int
@dataclass
| ProtoB |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 1144,
"end": 1200
} | class ____(A2):
def m0(self, x):
self.m1(x)
| B2 |
python | django__django | django/db/backends/base/operations.py | {
"start": 364,
"end": 31833
} | class ____:
"""
Encapsulate backend-specific differences, such as the way a backend
performs ordering or calculates the ID of a recently-inserted row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
"SmallIntegerField": (-32768, 32767),
"IntegerField": (-2147483648, 2147483647),
"BigIntegerField": (-9223372036854775808, 9223372036854775807),
"PositiveBigIntegerField": (0, 9223372036854775807),
"PositiveSmallIntegerField": (0, 32767),
"PositiveIntegerField": (0, 2147483647),
"SmallAutoField": (-32768, 32767),
"AutoField": (-2147483648, 2147483647),
"BigAutoField": (-9223372036854775808, 9223372036854775807),
}
set_operators = {
"union": "UNION",
"intersection": "INTERSECT",
"difference": "EXCEPT",
}
# Mapping of Field.get_internal_type() (typically the model field's class
# name) to the data type to use for the Cast() function, if different from
# DatabaseWrapper.data_types.
cast_data_types = {}
# CharField data type if the max_length argument isn't provided.
cast_char_field_without_max_length = None
# Start and end points for window expressions.
PRECEDING = "PRECEDING"
FOLLOWING = "FOLLOWING"
UNBOUNDED_PRECEDING = "UNBOUNDED " + PRECEDING
UNBOUNDED_FOLLOWING = "UNBOUNDED " + FOLLOWING
CURRENT_ROW = "CURRENT ROW"
# Prefix for EXPLAIN queries, or None EXPLAIN isn't supported.
explain_prefix = None
def __init__(self, connection):
self.connection = connection
self._cache = None
def __del__(self):
del self.connection
def autoinc_sql(self, table, column):
"""
Return any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Return the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def format_for_duration_arithmetic(self, sql):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"format_for_duration_arithmetic() method."
)
def cache_key_culling_sql(self):
"""
Return an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
cache_key = self.quote_name("cache_key")
return f"SELECT {cache_key} FROM %s ORDER BY {cache_key} LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, return the SQL that casts the result of a union
to that type. The resulting string should contain a '%s' placeholder
for the expression being cast.
"""
return "%s"
def date_extract_sql(self, lookup_type, sql, params):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_extract_sql() "
"method"
)
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
truncates the given date or datetime field field_name to a date object
with only the given specificity.
If `tzname` is provided, the given value is truncated in a specific
timezone.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_trunc_sql() "
"method."
)
def datetime_cast_date_sql(self, sql, params, tzname):
"""
Return the SQL to cast a datetime value to date value.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"datetime_cast_date_sql() method."
)
def datetime_cast_time_sql(self, sql, params, tzname):
"""
Return the SQL to cast a datetime value to time value.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"datetime_cast_time_sql() method"
)
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that extracts a value from the given
datetime field field_name.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a datetime_extract_sql() "
"method"
)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() "
"method"
)
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
"""
Given a lookup_type of 'hour', 'minute' or 'second', return the SQL
that truncates the given time or datetime field field_name to a time
object with only the given specificity.
If `tzname` is provided, the given value is truncated in a specific
timezone.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a time_trunc_sql() method"
)
def time_extract_sql(self, lookup_type, sql, params):
"""
Given a lookup_type of 'hour', 'minute', or 'second', return the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, sql, params)
def deferrable_sql(self):
"""
Return the SQL to make a constraint "initially deferred" during a
CREATE TABLE statement.
"""
return ""
def distinct_sql(self, fields, params):
"""
Return an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only check the given fields for
duplicates.
"""
if fields:
raise NotSupportedError(
"DISTINCT ON fields is not supported by this database backend"
)
else:
return ["DISTINCT"], []
def force_group_by(self):
"""
Return a GROUP BY clause to use with a HAVING clause when no grouping
is specified.
"""
return []
def force_no_ordering(self):
"""
Return a list used in the "ORDER BY" clause to force no ordering at
all. Return an empty list to include nothing in the ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return the FOR UPDATE SQL clause to lock rows for an update operation.
"""
return "FOR%s UPDATE%s%s%s" % (
" NO KEY" if no_key else "",
" OF %s" % ", ".join(of) if of else "",
" NOWAIT" if nowait else "",
" SKIP LOCKED" if skip_locked else "",
)
def _get_limit_offset_params(self, low_mark, high_mark):
offset = low_mark or 0
if high_mark is not None:
return (high_mark - offset), offset
elif offset:
return self.connection.ops.no_limit_value(), offset
return None, offset
def limit_offset_sql(self, low_mark, high_mark):
"""Return LIMIT/OFFSET SQL clause."""
limit, offset = self._get_limit_offset_params(low_mark, high_mark)
return " ".join(
sql
for sql in (
("LIMIT %d" % limit) if limit else None,
("OFFSET %d" % offset) if offset else None,
)
if sql
)
def fk_on_delete_sql(self, operation):
"""
Return the SQL to make an ON DELETE statement.
"""
if operation in ["CASCADE", "SET NULL", "SET DEFAULT"]:
return f" ON DELETE {operation}"
if operation == "":
return ""
raise NotImplementedError(f"ON DELETE {operation} is not supported.")
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join([f"({sql})" for sql in placeholder_rows_sql])
return f"VALUES {values_sql}"
def last_executed_query(self, cursor, sql, params):
"""
Return a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain string values.
def to_string(s):
return force_str(s, strings_only=True, errors="replace")
if isinstance(params, (list, tuple)):
u_params = tuple(to_string(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_string(k): to_string(v) for k, v in params.items()}
return "QUERY = %r - PARAMS = %r" % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, return the newly created ID.
`pk_name` is the name of the primary-key column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Return the string to use in a query when performing lookups
("contains", "like", etc.). It should contain a '%s' placeholder for
the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Return the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Return the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Return the value to use for the LIMIT when we are wanting "LIMIT
infinity". Return None if the limit clause can be omitted in this case.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a no_limit_value() method"
)
def pk_default_value(self):
"""
Return the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return "DEFAULT"
def prepare_sql_script(self, sql):
"""
Take an SQL script that may contain multiple lines and return a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
return [
sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql)
if statement
]
def process_clob(self, value):
"""
Return the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def returning_columns(self, fields):
"""
For backends that support returning columns as part of an insert or
update query, return the SQL and params to append to the query.
The returned fragment should contain a format string to hold the
appropriate column.
"""
if not fields:
return "", ()
columns = [
"%s.%s"
% (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
)
for field in fields
]
return "RETURNING %s" % ", ".join(columns), ()
def fetch_returned_rows(self, cursor, returning_params):
"""
Given a cursor object for a DML query with a RETURNING statement,
return the selected returning rows of tuples.
"""
return cursor.fetchall()
def compiler(self, compiler_name):
"""
Return the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Return a quoted version of the given table, index, or column name. Do
not quote the given name if it's already been quoted.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a quote_name() method"
)
def regex_lookup(self, lookup_type):
"""
Return the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). It should contain a '%s'
placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), raise
NotImplementedError.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a regex_lookup() method"
)
def savepoint_create_sql(self, sid):
"""
Return the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Return the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Return the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Return the SQL that will set the connection's time zone.
Return '' if the backend doesn't support time zones.
"""
return ""
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
"""
Return a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
If `reset_sequences` is True, the list includes SQL statements required
to reset the sequences.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations must provide an sql_flush() method"
)
def execute_sql_flush(self, sql_list):
"""Execute a list of SQL statements to flush the database."""
with transaction.atomic(
using=self.connection.alias,
savepoint=self.connection.features.can_rollback_ddl,
):
with self.connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def sequence_reset_by_name_sql(self, style, sequences):
"""
Return a list of the SQL statements required to reset sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Return a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""Return the SQL statement required to start a transaction."""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""Return the SQL statement required to end a transaction."""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Return the SQL that will be used in a query to define the tablespace.
Return '' if the backend doesn't support tablespaces.
If `inline` is True, append the SQL to a row; otherwise append it to
the entire CREATE TABLE or CREATE INDEX statement.
"""
return ""
def prep_for_like_query(self, x):
"""Prepare a value for use in a LIKE query."""
return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). Raise a ValueError if the value is
invalid, otherwise return the validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transform a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_integerfield_value(self, value, internal_type):
return value
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return str(value)
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is
expected by the backend driver for datetime columns.
"""
if value is None:
return None
return str(value)
def adapt_durationfield_value(self, value):
"""
Transform a timedelta value into an object compatible with what is
expected by the backend driver for duration columns (by default,
an integer of microseconds).
"""
if value is None:
return None
return duration_microseconds(value)
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return str(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return value
def adapt_ipaddressfield_value(self, value):
"""
Transform a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def adapt_json_value(self, value, encoder):
return json.dumps(value, cls=encoder)
def year_lookup_bounds_for_date_field(self, value, iso_year=False):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
"""
if iso_year:
first = datetime.date.fromisocalendar(value, 1, 1)
second = datetime.date.fromisocalendar(
value + 1, 1, 1
) - datetime.timedelta(days=1)
else:
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value, iso_year=False):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
"""
if iso_year:
first = datetime.datetime.fromisocalendar(value, 1, 1)
second = datetime.datetime.fromisocalendar(
value + 1, 1, 1
) - datetime.timedelta(microseconds=1)
else:
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Return a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection):
if value is not None:
return datetime.timedelta(0, 0, value)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotSupportedError.
"""
pass
def conditional_expression_supported_in_where_clause(self, expression):
"""
Return True, if the conditional expression is supported in the WHERE
clause.
"""
return True
def combine_expression(self, connector, sub_expressions):
"""
Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions).
"""
conn = " %s " % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return "%s"
def modify_insert_params(self, placeholder, params):
"""
Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
return a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params)
raise NotSupportedError(
"This backend does not support %s subtraction." % internal_type
)
def window_frame_value(self, value):
if isinstance(value, int):
if value == 0:
return self.CURRENT_ROW
elif value < 0:
return "%d %s" % (abs(value), self.PRECEDING)
else:
return "%d %s" % (value, self.FOLLOWING)
def window_frame_rows_start_end(self, start=None, end=None):
"""
Return SQL for start and end points in an OVER clause window frame.
"""
if isinstance(start, int) and isinstance(end, int) and start > end:
raise ValueError("start cannot be greater than end.")
if start is not None and not isinstance(start, int):
raise ValueError(
f"start argument must be an integer, zero, or None, but got '{start}'."
)
if end is not None and not isinstance(end, int):
raise ValueError(
f"end argument must be an integer, zero, or None, but got '{end}'."
)
start_ = self.window_frame_value(start) or self.UNBOUNDED_PRECEDING
end_ = self.window_frame_value(end) or self.UNBOUNDED_FOLLOWING
return start_, end_
def window_frame_range_start_end(self, start=None, end=None):
if (start is not None and not isinstance(start, int)) or (
isinstance(start, int) and start > 0
):
raise ValueError(
"start argument must be a negative integer, zero, or None, "
"but got '%s'." % start
)
if (end is not None and not isinstance(end, int)) or (
isinstance(end, int) and end < 0
):
raise ValueError(
"end argument must be a positive integer, zero, or None, but got '%s'."
% end
)
start_ = self.window_frame_value(start) or self.UNBOUNDED_PRECEDING
end_ = self.window_frame_value(end) or self.UNBOUNDED_FOLLOWING
features = self.connection.features
if features.only_supports_unbounded_with_preceding_and_following and (
(start and start < 0) or (end and end > 0)
):
raise NotSupportedError(
"%s only supports UNBOUNDED together with PRECEDING and "
"FOLLOWING." % self.connection.display_name
)
return start_, end_
def explain_query_prefix(self, format=None, **options):
if not self.connection.features.supports_explaining_query_execution:
raise NotSupportedError(
"This backend does not support explaining query execution."
)
if format:
supported_formats = self.connection.features.supported_explain_formats
normalized_format = format.upper()
if normalized_format not in supported_formats:
msg = "%s is not a recognized format." % normalized_format
if supported_formats:
msg += " Allowed formats: %s" % ", ".join(sorted(supported_formats))
else:
msg += (
f" {self.connection.display_name} does not support any formats."
)
raise ValueError(msg)
if options:
raise ValueError("Unknown options: %s" % ", ".join(sorted(options.keys())))
return self.explain_prefix
def insert_statement(self, on_conflict=None):
return "INSERT INTO"
def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields):
return ""
def prepare_join_on_clause(self, lhs_table, lhs_field, rhs_table, rhs_field):
lhs_expr = Col(lhs_table, lhs_field)
rhs_expr = Col(rhs_table, rhs_field)
return lhs_expr, rhs_expr
def format_debug_sql(self, sql):
# Hook for backends (e.g. NoSQL) to customize formatting.
return sqlparse.format(sql, reindent=True, keyword_case="upper")
def format_json_path_numeric_index(self, num):
"""
Hook for backends to customize array indexing in JSON paths.
"""
return "[%s]" % num
def compile_json_path(self, key_transforms, include_root=True):
"""
Hook for backends to customize all aspects of JSON path construction.
"""
path = ["$"] if include_root else []
for key_transform in key_transforms:
try:
num = int(key_transform)
except ValueError: # Non-integer.
path.append(".")
path.append(json.dumps(key_transform))
else:
if (
num < 0
and not self.connection.features.supports_json_negative_indexing
):
raise NotSupportedError(
"Using negative JSON array indices is not supported on this "
"database backend."
)
path.append(self.format_json_path_numeric_index(num))
return "".join(path)
| BaseDatabaseOperations |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 194464,
"end": 200761
} | class ____:
def _create_data(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype}
def test_set_records_masks(self):
data = self._create_data()
base = data['base']
mdtype = data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self._create_data()['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'two', b'three', b'four', b'five'])
def test_set_record_slice(self):
base = self._create_data()['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'pi', b'pi', b'four', b'five'])
def test_mask_element(self):
"Check record access"
base = self._create_data()['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
def _test_index(i):
assert_equal(type(a[i]), mvoid)
assert_equal_records(a[i]._data, a._data[i])
assert_equal_records(a[i]._mask, a._mask[i])
assert_equal(type(a[i, ...]), MaskedArray)
assert_equal_records(a[i, ...]._data, a._data[i, ...])
assert_equal_records(a[i, ...]._mask, a._mask[i, ...])
_test_index(1) # No mask
_test_index(0) # One element masked
_test_index(-2) # All element masked
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_setitem_scalar(self):
# 8510
mask_0d = np.ma.masked_array(1, mask=True)
arr = np.ma.arange(3)
arr[0] = mask_0d
assert_array_equal(arr.mask, [True, False, False])
def test_element_len(self):
data = self._create_data()
# check that len() works for mvoid (Github issue #576)
for rec in data['base']:
assert_equal(len(rec), len(data['ddtype']))
| TestMaskedFields |
python | Pylons__pyramid | tests/test_i18n.py | {
"start": 7495,
"end": 7918
} | class ____(unittest.TestCase):
def setUp(self):
testing.setUp()
def tearDown(self):
testing.tearDown()
def _callFUT(self, request):
from pyramid.i18n import get_localizer
return get_localizer(request)
def test_it(self):
request = DummyRequest()
request.localizer = 'localizer'
self.assertEqual(self._callFUT(request), 'localizer')
| Test_get_localizer |
python | django__django | tests/transactions/tests.py | {
"start": 13917,
"end": 17465
} | class ____(TransactionTestCase):
available_apps = ["transactions"]
forbidden_atomic_msg = "This is forbidden when an 'atomic' block is active."
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaisesMessage(
transaction.TransactionManagementError, self.forbidden_atomic_msg
):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaisesMessage(
transaction.TransactionManagementError, self.forbidden_atomic_msg
):
transaction.commit()
with self.assertRaisesMessage(
transaction.TransactionManagementError, self.forbidden_atomic_msg
):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(
transaction.TransactionManagementError, msg
) as cm:
r2.save(force_update=True)
self.assertIsInstance(cm.exception.__cause__, IntegrityError)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature("atomic_transactions")
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature("uses_savepoints")
@skipUnless(connection.vendor == "mysql", "MySQL-specific behaviors")
| AtomicErrorsTests |
python | huggingface__transformers | src/transformers/models/glm4_moe/modeling_glm4_moe.py | {
"start": 9037,
"end": 12660
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Glm4MoeConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_parameters = config.rope_parameters
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
if self.use_qk_norm: # main diff from Llama
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Glm4MoeAttention |
python | PyCQA__pylint | tests/functional/b/bugfix_local_scope_metaclass_1177.py | {
"start": 487,
"end": 1021
} | class ____:
class Meta3(type):
pass
class Class3(metaclass=Meta3):
pass
instance = Class3()
def mixed_scopes():
class ClassM(metaclass=Meta):
pass
return ClassM
def imported_and_nested_scope1():
class ClassImp1(metaclass=ImportedMetaclass):
pass
class ClassImp2(metaclass=ImportedMetaclass):
pass
return ClassImp1, ClassImp2
def imported_and_nested_scope2():
class ClassImp3(metaclass=ImportedMetaclass2):
pass
return ClassImp3
| ClassScope |
python | scikit-learn__scikit-learn | sklearn/linear_model/_coordinate_descent.py | {
"start": 79029,
"end": 89779
} | class ____(RegressorMixin, LinearModelCV):
"""Elastic Net model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or list of float, default=0.5
Float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path, used for each l1_ratio.
.. deprecated:: 1.7
`n_alphas` was deprecated in 1.7 and will be removed in 1.9. Use `alphas`
instead.
alphas : array-like or int, default=None
Values of alphas to test along the regularization path, used for each l1_ratio.
If int, `alphas` values are generated automatically.
If array-like, list of alpha values to use.
.. versionchanged:: 1.7
`alphas` accepts an integer value which removes the need to pass
`n_alphas`.
.. deprecated:: 1.7
`alphas=None` was deprecated in 1.7 and will be removed in 1.9, at which
point the default value will be set to 100.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
precompute : 'auto', bool or array-like of shape \
(n_features, n_features), default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are smaller or equal to
``tol``, the optimization code checks the dual gap for optimality and continues
until it is smaller or equal to ``tol``.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or int, default=0
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation.
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
intercept_ : float or ndarray of shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
dual_gap_ : float
The dual gaps at the end of the optimization for the optimal alpha.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
enet_path : Compute elastic net path with coordinate descent.
ElasticNet : Linear regression with combined L1 and L2 priors as regularizer.
Notes
-----
In `fit`, once the best parameters `l1_ratio` and `alpha` are found through
cross-validation, the model is fit again using the entire training set.
To avoid unnecessary memory duplication the `X` argument of the `fit`
method should be directly passed as a Fortran-contiguous numpy array.
The parameter `l1_ratio` corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
>>> from sklearn.linear_model import ElasticNetCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNetCV(cv=5, random_state=0)
>>> regr.fit(X, y)
ElasticNetCV(cv=5, random_state=0)
>>> print(regr.alpha_)
0.199
>>> print(regr.intercept_)
0.398
>>> print(regr.predict([[0, 0]]))
[0.398]
"""
_parameter_constraints: dict = {
**LinearModelCV._parameter_constraints,
"l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"],
}
path = staticmethod(enet_path)
def __init__(
self,
*,
l1_ratio=0.5,
eps=1e-3,
n_alphas="deprecated",
alphas="warn",
fit_intercept=True,
precompute="auto",
max_iter=1000,
tol=1e-4,
cv=None,
copy_X=True,
verbose=0,
n_jobs=None,
positive=False,
random_state=None,
selection="cyclic",
):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def _get_estimator(self):
return ElasticNet()
def _is_multitask(self):
return False
def fit(self, X, y, sample_weight=None, **params):
"""Fit ElasticNet model with coordinate descent.
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse. Note that large sparse matrices and arrays
requiring `int64` indices are not accepted.
y : array-like of shape (n_samples,)
Target values.
sample_weight : float or array-like of shape (n_samples,), \
default=None
Sample weights used for fitting and evaluation of the weighted
mean squared error of each cv-fold. Note that the cross validated
MSE that is finally used to find the best model is the unweighted
mean over the (weighted) MSEs of each test fold.
**params : dict, default=None
Parameters to be passed to the CV splitter.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns an instance of fitted model.
"""
return super().fit(X, y, sample_weight=sample_weight, **params)
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
| ElasticNetCV |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 56636,
"end": 57005
} | class ____(TestCase):
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-(2**31)], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2**31, decimal=5)
# Separate definitions so we can use them for matrix tests.
| TestNorm_NonSystematic |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_udp_port.py | {
"start": 1692,
"end": 4052
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid UDP port numbers."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_udp_port": [
"7", # Echo Protocol
"37", # Time Protocol
"53", # Domain Name System (DNS)
"520", # Routing Information Protocol (RIP)
"623", # ASF-RMCP & IPMI Remote Management Protocol
],
"malformed_udp_port": [
"",
"0",
"8",
"65536",
"This is not a port",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_udp_port"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_udp_port"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_udp_port"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidUdpPort().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidUdpPort |
python | PrefectHQ__prefect | src/prefect/server/database/orm_models.py | {
"start": 48065,
"end": 49886
} | class ____(Base):
@declared_attr.directive
def __tablename__(cls) -> str:
return "event_resources"
__table_args__: Any = (
sa.Index(
"ix_event_resources__resource_id__occurred",
"resource_id",
"occurred",
),
)
occurred: Mapped[DateTime]
resource_id: Mapped[str] = mapped_column(sa.Text())
resource_role: Mapped[str] = mapped_column(sa.Text())
resource: Mapped[dict[str, Any]] = mapped_column(sa_JSON)
event_id: Mapped[uuid.UUID]
# These are temporary until we've migrated all the references to the new,
# non-ORM names
ORMFlow = Flow
ORMFlowRunState = FlowRunState
ORMTaskRunState = TaskRunState
ORMArtifact = Artifact
ORMArtifactCollection = ArtifactCollection
ORMTaskRunStateCache = TaskRunStateCache
ORMRun = Run
ORMFlowRun = FlowRun
ORMTaskRun = TaskRun
ORMDeploymentSchedule = DeploymentSchedule
ORMDeployment = Deployment
ORMLog = Log
ORMConcurrencyLimit = ConcurrencyLimit
ORMConcurrencyLimitV2 = ConcurrencyLimitV2
ORMBlockType = BlockType
ORMBlockSchema = BlockSchema
ORMBlockSchemaReference = BlockSchemaReference
ORMBlockDocument = BlockDocument
ORMBlockDocumentReference = BlockDocumentReference
ORMConfiguration = Configuration
ORMSavedSearch = SavedSearch
ORMWorkQueue = WorkQueue
ORMWorkPool = WorkPool
ORMWorker = Worker
ORMAgent = Agent
ORMVariable = Variable
ORMFlowRunInput = FlowRunInput
ORMCsrfToken = CsrfToken
ORMAutomation = Automation
ORMAutomationBucket = AutomationBucket
ORMAutomationRelatedResource = AutomationRelatedResource
ORMCompositeTriggerChildFiring = CompositeTriggerChildFiring
ORMAutomationEventFollower = AutomationEventFollower
ORMEvent = Event
ORMEventResource = EventResource
_UpsertColumns = Iterable[Union[str, "sa.Column[Any]", roles.DDLConstraintColumnRole]]
| EventResource |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 6936,
"end": 8160
} | class ____(Benchmark):
r"""
Mishra 2 objective function.
This class defines the Mishra 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra02}}({x}) = (1 + x_n)^{x_n}
with
.. math::
x_n = n - \sum_{i=1}^{n-1} \frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum((x[:-1] + x[1:]) / 2.0)
return (1 + xn) ** xn
| Mishra02 |
python | streamlit__streamlit | lib/tests/streamlit/runtime/download_data_util_test.py | {
"start": 868,
"end": 5994
} | class ____(unittest.TestCase):
def test_str_is_converted_to_bytes_and_text_plain(self):
"""Strings are encoded to bytes and inferred as text/plain."""
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
"hello", unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == b"hello"
assert mime == "text/plain"
def test_text_io_wrapper_is_converted_to_bytes_and_text_plain(self):
"""io.TextIOWrapper is read fully and inferred as text/plain."""
content = "Line 1\nLine 2"
fd, path = tempfile.mkstemp(text=True)
os.close(fd)
try:
with open(path, "w", encoding="utf-8") as f:
f.write(content)
with open(path, encoding="utf-8") as text_io:
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
text_io, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == content.encode("utf-8")
assert mime == "text/plain"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_bytes_passthrough_and_octet_stream(self):
"""Bytes are returned as-is, with application/octet-stream."""
payload = b"\x00\x01\x02"
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
payload, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
def test_bytesio_rewinds_and_reads_all(self):
"""BytesIO is rewound and read fully."""
payload = b"abcdef"
bio = io.BytesIO(payload)
bio.seek(3) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
bio, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
def test_buffered_reader_rewinds_and_reads_all(self):
"""BufferedReader (rb open) is rewound and read fully."""
fd, path = tempfile.mkstemp()
os.close(fd)
payload = b"\x10\x20\x30\x40"
try:
with open(path, "wb") as f:
f.write(payload)
with open(path, "rb") as f:
f.read(2) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
f, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_raw_io_base_fileio_rewinds_and_reads_all(self):
"""FileIO (RawIOBase) is rewound and read fully."""
fd, path = tempfile.mkstemp()
os.close(fd)
payload = b"\xaa\xbb\xcc"
try:
with open(path, "wb") as f:
f.write(payload)
with io.FileIO(path, "rb") as raw: # type: ignore[arg-type]
raw.read(1) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
raw, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_raw_io_base_empty_file_returns_empty_bytes(self):
"""Empty RawIOBase should return empty bytes and application/octet-stream."""
fd, path = tempfile.mkstemp()
os.close(fd)
try:
# Ensure empty file
with io.FileIO(path, "rb") as raw: # type: ignore[arg-type]
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
raw, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == b""
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_unsupported_type_raises_given_exception(self):
"""Unsupported types raise the provided exception."""
with pytest.raises(RuntimeError, match="custom unsupported"):
convert_data_to_bytes_and_infer_mime(
["not", "supported"],
unsupported_error=RuntimeError("custom unsupported"),
)
def test_supported_type_ignores_unsupported_error_and_returns_normally(self):
"""Supported types do not raise, even if unsupported_error is provided."""
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
b"ok", unsupported_error=RuntimeError("should not raise")
)
assert data_as_bytes == b"ok"
assert mime == "application/octet-stream"
| ConvertDataToBytesAndInferMimeTest |
python | chroma-core__chroma | chromadb/test/ef/test_multimodal_ef.py | {
"start": 504,
"end": 6173
} | class ____(EmbeddingFunction[Embeddable]):
def __init__(self) -> None:
self._hef = hashing_embedding_function(dim=10, dtype=np.float64)
def __call__(self, input: Embeddable) -> Embeddings:
to_texts = [str(i) for i in input]
embeddings = np.array(self._hef(to_texts))
# Normalize the embeddings
# This is so we can generate random unit vectors and have them be close to the embeddings
embeddings /= np.linalg.norm(embeddings, axis=1, keepdims=True) # type: ignore[misc]
return cast(Embeddings, embeddings.tolist())
def random_image() -> Image:
return np.random.randint(0, 255, size=(10, 10, 3), dtype=np.int64)
def random_document() -> Document:
return str(random_image())
@pytest.fixture
def multimodal_collection(
default_ef: EmbeddingFunction[Embeddable] = hashing_multimodal_ef(),
) -> Generator[chromadb.Collection, None, None]:
settings = Settings()
if os.environ.get("CHROMA_INTEGRATION_TEST_ONLY"):
host = os.environ.get("CHROMA_SERVER_HOST", "localhost")
port = int(os.environ.get("CHROMA_SERVER_HTTP_PORT", 0))
settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
settings.chroma_server_http_port = port
settings.chroma_server_host = host
client = chromadb.Client(settings=settings)
collection = client.create_collection(
name="multimodal_collection", embedding_function=default_ef
)
yield collection
client.clear_system_cache()
# Test adding and querying of a multimodal collection consisting of images and documents
def test_multimodal(
multimodal_collection: chromadb.Collection,
default_ef: EmbeddingFunction[Embeddable] = hashing_multimodal_ef(),
n_examples: int = 10,
n_query_results: int = 3,
) -> None:
# Fix numpy's random seed for reproducibility
random_state = np.random.get_state()
np.random.seed(0)
image_ids = [str(i) for i in range(n_examples)]
images = [random_image() for _ in range(n_examples)]
image_embeddings = default_ef(images)
document_ids = [str(i) for i in range(n_examples, 2 * n_examples)]
documents = [random_document() for _ in range(n_examples)]
document_embeddings = default_ef(documents)
# Trying to add a document and an image at the same time should fail
with pytest.raises(
ValueError,
# This error string may be in any order
match=r"Exactly one of (images|documents|uris)(?:, (images|documents|uris))?(?:, (images|documents|uris))? must be provided in add\.",
):
multimodal_collection.add(
ids=image_ids[0], documents=documents[0], images=images[0]
)
# Add some documents
multimodal_collection.add(ids=document_ids, documents=documents)
# Add some images
multimodal_collection.add(ids=image_ids, images=images)
# get() should return all the documents and images
# ids corresponding to images should not have documents
get_result = multimodal_collection.get(include=["documents"])
assert len(get_result["ids"]) == len(document_ids) + len(image_ids)
for i, id in enumerate(get_result["ids"]):
assert id in document_ids or id in image_ids
assert get_result["documents"] is not None
if id in document_ids:
assert get_result["documents"][i] == documents[document_ids.index(id)]
if id in image_ids:
assert get_result["documents"][i] is None
# Generate a random query image
query_image = random_image()
query_image_embedding = default_ef([query_image])
image_neighbor_indices, _ = _exact_distances(
query_image_embedding, image_embeddings + document_embeddings
)
# Get the ids of the nearest neighbors
nearest_image_neighbor_ids = [
image_ids[i] if i < n_examples else document_ids[i % n_examples]
for i in image_neighbor_indices[0][:n_query_results]
]
# Generate a random query document
query_document = random_document()
query_document_embedding = default_ef([query_document])
document_neighbor_indices, _ = _exact_distances(
query_document_embedding, image_embeddings + document_embeddings
)
nearest_document_neighbor_ids = [
image_ids[i] if i < n_examples else document_ids[i % n_examples]
for i in document_neighbor_indices[0][:n_query_results]
]
# Querying with both images and documents should fail
with pytest.raises(ValueError):
multimodal_collection.query(
query_images=[query_image], query_texts=[query_document]
)
# Query with images
query_result = multimodal_collection.query(
query_images=[query_image], n_results=n_query_results, include=["documents"]
)
assert query_result["ids"][0] == nearest_image_neighbor_ids
# Query with documents
query_result = multimodal_collection.query(
query_texts=[query_document], n_results=n_query_results, include=["documents"]
)
assert query_result["ids"][0] == nearest_document_neighbor_ids
np.random.set_state(random_state)
@pytest.mark.xfail
def test_multimodal_update_with_image(
multimodal_collection: chromadb.Collection,
) -> None:
# Updating an entry with an existing document should remove the documentß
document = random_document()
image = random_image()
id = "0"
multimodal_collection.add(ids=id, documents=document)
multimodal_collection.update(ids=id, images=image)
get_result = multimodal_collection.get(ids=id, include=["documents"])
assert get_result["documents"] is not None
assert get_result["documents"][0] is None
| hashing_multimodal_ef |
python | coleifer__peewee | tests/pwiz_integration.py | {
"start": 449,
"end": 742
} | class ____(TestModel):
user = ForeignKeyField(User)
text = TextField(index=True)
data = IntegerField(default=0)
misc = IntegerField(default=0)
class Meta:
indexes = (
(('user', 'text'), True),
(('user', 'data', 'misc'), False),
)
| Note |
python | ray-project__ray | python/ray/_private/gcs_utils.py | {
"start": 2080,
"end": 4270
} | class ____:
def __init__(self, gcs_address: Optional[str] = None, aio: bool = False):
self._gcs_address = gcs_address
self._aio = aio
@property
def address(self):
return self._gcs_address
def connect(self):
# GCS server uses a cached port, so it should use the same port after
# restarting. This means GCS address should stay the same for the
# lifetime of the Ray cluster.
self._channel = create_gcs_channel(self._gcs_address, self._aio)
def channel(self):
return self._channel
def cleanup_redis_storage(
host: str,
port: int,
password: str,
use_ssl: bool,
storage_namespace: str,
username: Optional[str] = None,
):
"""This function is used to cleanup the GCS storage in Redis.
It supports Redis in cluster and non-cluster modes.
Args:
host: The Redis host address.
port: The Redis port.
username: The Redis username.
password: The Redis password.
use_ssl: Whether to encrypt the connection.
storage_namespace: The namespace of the storage to be deleted.
"""
from ray._raylet import del_key_prefix_from_storage # type: ignore
if not isinstance(host, str):
raise ValueError("Host must be a string")
if username is None:
username = ""
if not isinstance(username, str):
raise ValueError("Username must be a string")
if not isinstance(password, str):
raise ValueError("Password must be a string")
if port < 0:
raise ValueError(f"Invalid port: {port}")
if not isinstance(use_ssl, bool):
raise TypeError("use_ssl must be a boolean")
if not isinstance(storage_namespace, str):
raise ValueError("storage namespace must be a string")
# Right now, GCS stores all data in multiple hashes with keys prefixed by
# storage_namespace. So we only need to delete the specific key prefix to cleanup
# the cluster's data.
# Note this deletes all keys with prefix `RAY{key_prefix}@`, not `{key_prefix}`.
return del_key_prefix_from_storage(
host, port, username, password, use_ssl, storage_namespace
)
| GcsChannel |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 78829,
"end": 79299
} | class ____(BiffRecord):
"""
This record is part of the Page Settings Block. It specifies if the
sheet is centred vertically when printed.
Record VCENTER, BIFF3-BIFF8:
Offset Size Contents
0 2 0 = Print sheet aligned at top page border
1 = Print sheet vertically centred
"""
_REC_ID = 0x0084
def __init__(self, is_vert_center):
self._rec_data = pack('<H', is_vert_center)
| VCenterRecord |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/resource.py | {
"start": 27138,
"end": 27379
} | class ____(NamedTuple):
nested_partial_resources: dict[str, Any]
config_schema: DagsterField
resource_fn: Callable[[InitResourceContext], Any]
description: Optional[str]
nested_resources: dict[str, Any]
| PartialResourceState |
python | allegroai__clearml | examples/frameworks/fire/fire_grouping_cmd.py | {
"start": 195,
"end": 401
} | class ____(object):
def __init__(self):
self.other = Other()
def run(self):
return "Ingesting! Nom nom nom..."
def hello(self, hello_str):
return hello_str
| IngestionStage |
python | pydata__xarray | xarray/tests/test_rolling.py | {
"start": 17256,
"end": 20019
} | class ____:
@pytest.mark.parametrize("dim", ["time", "x"])
@pytest.mark.parametrize(
"window_type, window",
[["span", 5], ["alpha", 0.5], ["com", 0.5], ["halflife", 5]],
)
@pytest.mark.parametrize("backend", ["numpy"], indirect=True)
@pytest.mark.parametrize("func", ["mean", "sum", "var", "std"])
def test_rolling_exp_runs(self, da, dim, window_type, window, func) -> None:
da = da.where(da > 0.2)
rolling_exp = da.rolling_exp(window_type=window_type, **{dim: window})
result = getattr(rolling_exp, func)()
assert isinstance(result, DataArray)
@pytest.mark.parametrize("dim", ["time", "x"])
@pytest.mark.parametrize(
"window_type, window",
[["span", 5], ["alpha", 0.5], ["com", 0.5], ["halflife", 5]],
)
@pytest.mark.parametrize("backend", ["numpy"], indirect=True)
def test_rolling_exp_mean_pandas(self, da, dim, window_type, window) -> None:
da = da.isel(a=0).where(lambda x: x > 0.2)
result = da.rolling_exp(window_type=window_type, **{dim: window}).mean()
assert isinstance(result, DataArray)
pandas_array = da.to_pandas()
assert pandas_array.index.name == "time"
if dim == "x":
pandas_array = pandas_array.T
expected = xr.DataArray(
pandas_array.ewm(**{window_type: window}).mean()
).transpose(*da.dims)
assert_allclose(expected.variable, result.variable)
@pytest.mark.parametrize("backend", ["numpy"], indirect=True)
@pytest.mark.parametrize("func", ["mean", "sum"])
def test_rolling_exp_keep_attrs(self, da, func) -> None:
attrs = {"attrs": "da"}
da.attrs = attrs
# Equivalent of `da.rolling_exp(time=10).mean`
rolling_exp_func = getattr(da.rolling_exp(time=10), func)
# attrs are kept per default
result = rolling_exp_func()
assert result.attrs == attrs
# discard attrs
result = rolling_exp_func(keep_attrs=False)
assert result.attrs == {}
# test discard attrs using global option
with set_options(keep_attrs=False):
result = rolling_exp_func()
assert result.attrs == {}
# keyword takes precedence over global option
with set_options(keep_attrs=False):
result = rolling_exp_func(keep_attrs=True)
assert result.attrs == attrs
with set_options(keep_attrs=True):
result = rolling_exp_func(keep_attrs=False)
assert result.attrs == {}
with pytest.warns(
UserWarning,
match="Passing ``keep_attrs`` to ``rolling_exp`` has no effect.",
):
da.rolling_exp(time=10, keep_attrs=True)
| TestDataArrayRollingExp |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 30523,
"end": 30914
} | class ____(BaseDataset):
"""
Feature: Datasets can use the fletcher32 filter
"""
def test_fletcher32(self):
""" Enable fletcher32 filter """
dset = self.f.create_dataset(make_name(), (20, 30), fletcher32=True)
self.assertTrue(dset.fletcher32)
@ut.skipIf('scaleoffset' not in h5py.filters.encode, "SCALEOFFSET is not installed")
| TestCreateFletcher32 |
python | ray-project__ray | python/ray/serve/tests/test_cli_3.py | {
"start": 1427,
"end": 1605
} | class ____:
def __init__(self):
raise RuntimeError("Intentionally failing.")
constructor_failure_node = ConstructorFailure.bind()
@serve.deployment
| ConstructorFailure |
python | getsentry__sentry | tests/acceptance/chartcuterie/test_chart_renderer.py | {
"start": 127,
"end": 859
} | class ____(AcceptanceTestCase):
def test_debug_renders(self) -> None:
options = {
"chart-rendering.enabled": True,
"system.url-prefix": self.browser.live_server_url,
}
with self.options(options):
self.browser.get("debug/charts/chart-renderer/")
self.wait_for_loading()
images = self.browser.elements(selector="img")
assert len(images) > 0
for image in images:
src = image.get_attribute("src")
resp = self.client.get(src)
# Ensure our chart images actually look like pngs
assert resp.status_code == 200
assert close_streaming_response(resp)[:4] == b"\x89PNG"
| TestChartRenderer |
python | readthedocs__readthedocs.org | readthedocs/search/api/v2/serializers.py | {
"start": 2414,
"end": 6495
} | class ____(serializers.Serializer):
"""
Page serializer.
If ``projects`` is passed in the constructor, the serializer
will pre-generate a cache with that information,
this is to avoid querying the database again for each result.
:param projects: A list of tuples of project and version.
"""
type = serializers.CharField(default="page", source=None, read_only=True)
project = serializers.CharField()
project_alias = serializers.SerializerMethodField()
version = serializers.CharField()
title = serializers.CharField()
path = serializers.SerializerMethodField()
domain = serializers.SerializerMethodField()
highlights = PageHighlightSerializer(source="meta.highlight", default=dict)
blocks = SectionSearchSerializer(source="meta.inner_hits.sections", many=True, default=list)
def __init__(self, *args, projects=None, **kwargs):
if projects:
context = kwargs.setdefault("context", {})
context["projects_data"] = {
project.slug: self._build_project_data(project, version=version)
for project, version in projects
}
super().__init__(*args, **kwargs)
def _build_project_data(self, project, version):
"""Build a `ProjectData` object given a project and its version."""
# NOTE: re-using the resolver doesn't help here,
# as this method is called just once per project,
# re-using the resolver is useful when resolving the same project multiple times.
url = Resolver().resolve_version(project, version)
project_alias = None
if project.parent_relationship:
project_alias = project.parent_relationship.alias
version_data = VersionData(
slug=version.slug,
docs_url=url,
)
return ProjectData(
alias=project_alias,
version=version_data,
)
def _get_project_data(self, obj):
"""
Get and cache the project data.
Try to get the data from the ``projects_data`` context,
and fallback to get it from the database.
If the result is fetched from the database,
it's cached into ``projects_data``.
"""
project_data = self.context.get("projects_data", {}).get(obj.project)
if project_data:
return project_data
version = (
Version.objects.filter(project__slug=obj.project, slug=obj.version)
.select_related("project")
.first()
)
if version:
project = version.project
projects_data = self.context.setdefault("projects_data", {})
projects_data[obj.project] = self._build_project_data(project, version=version)
return projects_data[obj.project]
return None
def get_project_alias(self, obj):
project_data = self._get_project_data(obj)
if project_data:
return project_data.alias
return None
def get_domain(self, obj):
full_path = self._get_full_path(obj)
if full_path:
parsed = urlparse(full_path)
return f"{parsed.scheme}://{parsed.netloc}"
return None
def get_path(self, obj):
full_path = self._get_full_path(obj)
if full_path:
parsed = urlparse(full_path)
return parsed.path
return None
def _get_full_path(self, obj):
project_data = self._get_project_data(obj)
if project_data:
docs_url = project_data.version.docs_url
path = obj.full_path
# Generate an appropriate link for the doctypes that use htmldir,
# and always end it with / so it goes directly to proxito.
# For a generic doctype we just strip the index.html part if it exists.
if obj.doctype in {SPHINX_HTMLDIR, MKDOCS, GENERIC}:
path = re.sub("(^|/)index.html$", "/", path)
return docs_url.rstrip("/") + "/" + path.lstrip("/")
return None
| PageSearchSerializer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.