language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/nanochat/modular_nanochat.py | {
"start": 1894,
"end": 4323
} | class ____(Qwen3Attention):
def __init__(self, config: NanoChatConfig, layer_idx: int):
super().__init__(config, layer_idx)
del self.sliding_window
del self.layer_type
self.q_norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
self.k_norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
# RoPE -> Norm (instead of usual Norm -> RoPE)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| NanoChatAttention |
python | tensorflow__tensorflow | tensorflow/python/training/gradient_descent.py | {
"start": 1133,
"end": 3408
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._learning_rate_tensor = None
def _apply_dense(self, grad, var):
return gen_training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return gen_training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle,
indices,
-grad * math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype))
def _apply_sparse_duplicate_indices(self, grad, var):
delta = indexed_slices.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
| GradientDescentOptimizer |
python | google__jax | jax/experimental/pallas/ops/tpu/ragged_paged_attention/kernel.py | {
"start": 1254,
"end": 31883
} | class ____:
"""Descriptor for async copy of multiple K/V pages from HBM."""
def __init__(
self,
pages_hbm_ref, # [total_num_pages, page_size, num_combined_kv_heads_per_blk, head_dim]
vmem_buf, # [num_kv_pages_per_blk, page_size, num_combined_kv_heads_per_blk, head_dim]
sem,
page_indices_ref, # i32[max_num_seqs, pages_per_seq]
metadata, # [seq_idx, start_page_idx, end_page_idx]
):
self._vmem_buf = vmem_buf
seq_id, start_page_idx, end_page_idx = metadata
self._async_copies = []
# TODO(jevinjiang): Only fetch dynamic shape in need! This will insert
# a bunch of if-ops. Check the performance when we have benchmarking setup.
for i in range(vmem_buf.shape[0]):
page_idx = start_page_idx + i
page_idx = jax.lax.select(page_idx < end_page_idx, page_idx, 0)
self._async_copies.append(
pltpu.make_async_copy(
pages_hbm_ref.at[page_indices_ref[seq_id, page_idx]],
vmem_buf.at[i],
sem,
)
)
def start(self):
"""Starts the async copies."""
for async_copy in self._async_copies:
async_copy.start()
def wait(self):
for async_copy in self._async_copies:
async_copy.wait()
return self._vmem_buf
def ref_ragged_paged_attention(
queries: jax.Array, # [max_num_batched_tokens, num_q_heads, head_dim]
kv_pages: jax.Array, # [total_num_pages, page_size, num_combined_kv_heads, head_dim]
kv_lens: jax.Array, # i32[max_num_seqs]
page_indices: jax.Array, # i32[max_num_seqs, pages_per_seq]
cu_q_lens: jax.Array, # i32[max_num_seqs + 1]
num_seqs: jax.Array, # i32[1],
*,
sm_scale: float = 1.0,
sliding_window: int | None = None,
soft_cap: float | None = None,
mask_value: float | None = DEFAULT_MASK_VALUE,
k_scale: float | None = None,
v_scale: float | None = None,
):
static_validate_inputs(
queries,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs,
sm_scale=sm_scale,
k_scale=k_scale,
v_scale=v_scale,
sliding_window=sliding_window,
soft_cap=soft_cap,
mask_value=mask_value,
)
if mask_value is None:
mask_value = DEFAULT_MASK_VALUE
_, _, num_combined_kv_heads, head_dim = kv_pages.shape
assert num_combined_kv_heads % 2 == 0
num_kv_heads = num_combined_kv_heads // 2
num_q_heads = queries.shape[1]
assert num_q_heads % num_kv_heads == 0
num_query_per_kv = num_q_heads // num_kv_heads
outputs = []
for i in range(num_seqs[0]):
q_start = cu_q_lens[i]
q_end = cu_q_lens[i + 1]
q_len = q_end - q_start
kv_len = kv_lens[i]
indices = page_indices[i]
q = queries[q_start:q_end]
k = kv_pages[indices, :, 0::2, :].reshape(-1, num_kv_heads, head_dim)[
:kv_len
]
v = kv_pages[indices, :, 1::2, :].reshape(-1, num_kv_heads, head_dim)[
:kv_len
]
if k_scale is not None:
k = k.astype(jnp.float32) * k_scale
k = k.astype(q.dtype)
if v_scale is not None:
v = v.astype(jnp.float32) * v_scale
v = v.astype(q.dtype)
k = jnp.repeat(k, num_query_per_kv, axis=1)
v = jnp.repeat(v, num_query_per_kv, axis=1)
attn = jnp.einsum("qhd,khd->hqk", q, k, preferred_element_type=jnp.float32)
attn *= sm_scale
q_span = (kv_len - q_len) + jax.lax.broadcasted_iota(
jnp.int32, attn.shape, 1
)
kv_span = jax.lax.broadcasted_iota(jnp.int32, attn.shape, 2)
mask = q_span < kv_span
if sliding_window is not None:
mask = jnp.logical_or(mask, q_span - sliding_window >= kv_span)
if soft_cap is not None:
attn = soft_cap * jnp.tanh(attn / soft_cap)
attn += jnp.where(mask, mask_value, 0.0)
attn = jax.nn.softmax(attn, axis=-1).astype(v.dtype)
out = jnp.einsum("hqk,khd->qhd", attn, v).astype(queries.dtype)
outputs.append(out)
return jnp.concatenate(outputs, axis=0)
# Expect to run these checks during runtime.
def dynamic_validate_inputs(
q: jax.Array, # [max_num_batched_tokens, num_q_heads, head_dim]
kv_pages: jax.Array, # [total_num_pages, page_size, num_combined_kv_heads, head_dim]
kv_lens: jax.Array, # i32[max_num_seqs]
page_indices: jax.Array, # i32[max_num_seqs, pages_per_seq]
cu_q_lens: jax.Array, # i32[max_num_seqs + 1]
num_seqs: jax.Array, # i32[1]
*,
# These inputs are optional. If not specified, we will not validate them.
sm_scale: float | None = None,
sliding_window: int | None = None,
soft_cap: float | None = None,
mask_value: float | None = None,
k_scale: float | None = None,
v_scale: float | None = None,
# Kernel tuning params.
num_kv_pages_per_block: int | None = None,
num_queries_per_block: int | None = None,
vmem_limit_bytes: int | None = None,
):
static_validate_inputs(
q,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs,
sm_scale=sm_scale,
sliding_window=sliding_window,
soft_cap=soft_cap,
mask_value=mask_value,
k_scale=k_scale,
v_scale=v_scale,
num_kv_pages_per_block=num_kv_pages_per_block,
num_queries_per_block=num_queries_per_block,
vmem_limit_bytes=vmem_limit_bytes,
)
max_num_batched_tokens = q.shape[0]
page_size = kv_pages.shape[1]
max_num_seqs, pages_per_seq = page_indices.shape
if num_seqs[0] > max_num_seqs:
raise ValueError(f"{num_seqs[0]=} must be less or equal to {max_num_seqs=}")
max_kv_len = jnp.max(kv_lens)
min_pages_per_seq = pl.cdiv(max_kv_len, page_size)
if pages_per_seq < min_pages_per_seq:
raise ValueError(
f"{pages_per_seq=} must be greater or equal to"
f" {min_pages_per_seq=} given {max_kv_len=} and {page_size=}."
)
if cu_q_lens[num_seqs[0]] > max_num_batched_tokens:
raise ValueError(
f"Total q tokens {cu_q_lens[num_seqs[0]]} must be less or equal to"
f" {max_num_batched_tokens=}."
)
for i in range(num_seqs[0]):
q_len = cu_q_lens[i + 1] - cu_q_lens[i]
kv_len = kv_lens[i]
if q_len > kv_len:
raise ValueError(
f"{q_len=} must be less or equal to {kv_len=} at sequence {i}."
)
# Expect to run these checks during compile time.
def static_validate_inputs(
q: jax.Array, # [max_num_batched_tokens, num_q_heads, head_dim]
kv_pages: jax.Array, # [total_num_pages, page_size, num_combined_kv_heads, head_dim]
kv_lens: jax.Array, # i32[max_num_seqs]
page_indices: jax.Array, # i32[max_num_seqs, pages_per_seq]
cu_q_lens: jax.Array, # i32[max_num_seqs + 1]
num_seqs: jax.Array, # i32[1]
*,
# These inputs are optional. If not specified, we will not validate them.
sm_scale: float | None = None,
sliding_window: int | None = None,
soft_cap: float | None = None,
mask_value: float | None = None,
k_scale: float | None = None,
v_scale: float | None = None,
# Kernel tuning params.
num_kv_pages_per_block: int | None = None,
num_queries_per_block: int | None = None,
vmem_limit_bytes: int | None = None,
):
_, num_q_heads, head_dim = q.shape
_, _, num_combined_kv_heads, head_dim_k = kv_pages.shape
assert num_combined_kv_heads % 2 == 0
assert isinstance(k_scale, float) or k_scale is None
assert isinstance(v_scale, float) or v_scale is None
num_kv_heads = num_combined_kv_heads // 2
max_num_seqs, pages_per_seq = page_indices.shape
if num_seqs.shape != (1,):
raise ValueError(f"{num_seqs.shape=} must be (1,)")
if head_dim_k != head_dim:
raise ValueError(
f"Q head_dim {head_dim} must be the same as that of K/V {head_dim_k}."
)
if kv_lens.shape != (max_num_seqs,):
raise ValueError(
f"Expected {kv_lens.shape=} to be ({max_num_seqs},) where"
" `max_num_seqs` is `page_indices.shape[0]`."
)
if cu_q_lens.shape != (max_num_seqs + 1,):
raise ValueError(
f"Expected {cu_q_lens.shape=} to be ({max_num_seqs + 1},) where"
" `max_num_seqs` is `page_indices.shape[0]`."
)
if (
kv_lens.dtype != jnp.int32
or page_indices.dtype != jnp.int32
or cu_q_lens.dtype != jnp.int32
):
raise ValueError(
"The dtype of `kv_lens`, `page_indices`, and `cu_q_lens` must be"
f" int32. Got {kv_lens.dtype=}, {page_indices.dtype=},"
f" {cu_q_lens.dtype=}."
)
if num_q_heads % num_kv_heads != 0:
raise ValueError(f"{num_q_heads=} must be divisible by {num_kv_heads=}")
if sliding_window is not None and sliding_window <= 0:
raise ValueError(f"{sliding_window=} must be positive.")
if soft_cap is not None and soft_cap == 0.0:
raise ValueError(f"{soft_cap=} must not be 0.0.")
if (
num_kv_pages_per_block is not None
and not 0 < num_kv_pages_per_block <= pages_per_seq
):
raise ValueError(
f"{num_kv_pages_per_block=} must be in range (0, {pages_per_seq}]."
)
if num_queries_per_block is not None and num_queries_per_block <= 0:
raise ValueError(f"{num_queries_per_block=} must be positive.")
if vmem_limit_bytes is not None and vmem_limit_bytes <= 0:
raise ValueError(f"{vmem_limit_bytes=} must be positive.")
del sm_scale # No constraints on sm_scale.
del mask_value # No consstraints on mask_value.
def ragged_paged_attention_kernel(
# Prefetch
kv_lens_ref, # [max_num_seqs]
page_indices_ref, # [max_num_seqs, pages_per_seq]
cu_q_lens_ref, # [max_num_seqs + 1]
seq_buf_idx_ref,
# TODO(jevinjiang): if OOM in SMEM, consider pack to other scalar refs.
num_seqs_ref,
# Input
q_ref, # [num_q_per_blk, num_q_heads_per_blk, head_dim]
kv_pages_hbm_ref, # [total_num_pages, page_size, num_combined_kv_heads, head_dim]
# Output
o_ref, # [num_q_per_blk, num_q_heads_per_blk, head_dim]
# Scratch
kv_bufs, # [2, num_kv_pages_per_blk, page_size, num_combined_kv_heads_per_blk, head_dim]
sems, # [2, 2]
l_ref, # [num_kv_heads_per_blk, num_q_per_blk * num_q_heads_per_kv_head, 128]
m_ref, # [num_kv_heads_per_blk, num_q_per_blk * num_q_heads_per_kv_head, 128]
acc_ref, # [num_q_per_blk, num_q_heads_per_blk, head_dim]
*,
sm_scale: float,
sliding_window: int | None = None,
soft_cap: float | None = None,
mask_value: float | None = DEFAULT_MASK_VALUE,
k_scale: float | None = None,
v_scale: float | None = None,
):
if mask_value is None:
mask_value = DEFAULT_MASK_VALUE
num_q_per_blk, num_q_heads_per_blk, head_dim = q_ref.shape
pages_per_seq = page_indices_ref.shape[-1]
num_seqs = num_seqs_ref[0]
_, num_kv_pages_per_blk, page_size, num_combined_kv_heads_per_blk, _ = (
kv_bufs.shape
)
num_kv_heads_per_blk = num_combined_kv_heads_per_blk // 2
num_kv_per_blk = num_kv_pages_per_blk * page_size
num_q_heads_per_kv_head = num_q_heads_per_blk // num_kv_heads_per_blk
heads_blk_idx, q_blk_idx = (
pl.program_id(0),
pl.program_id(1),
)
num_heads_blks = pl.num_programs(0)
init_seq_idx = seq_buf_idx_ref[0]
init_buf_idx = seq_buf_idx_ref[1]
q_len_start = q_blk_idx * num_q_per_blk
q_len_end = q_len_start + num_q_per_blk
def create_kv_async_copy_descriptors(
heads_blk_idx, seq_idx, kv_blk_idx, buf_idx
):
start_kv_page_idx = kv_blk_idx * num_kv_pages_per_blk
end_kv_page_idx = jnp.minimum(
pages_per_seq, pl.cdiv(kv_lens_ref[seq_idx], page_size)
)
metadata = (seq_idx, start_kv_page_idx, end_kv_page_idx)
heads_start = heads_blk_idx * num_combined_kv_heads_per_blk
async_copy_kv = MultiPageAsyncCopyDescriptor(
kv_pages_hbm_ref.at[
:, :, pl.ds(heads_start, num_combined_kv_heads_per_blk), :
],
kv_bufs.at[buf_idx],
sems.at[buf_idx],
page_indices_ref,
metadata,
)
return async_copy_kv
# TODO(jevinjiang): Add these to Mosaic:
# 1. Support arbitrary strided load/store for int4 and int8 dtype.
# 2. Support arbitrary strided load/store for any last dimension.
def strided_load_kv(ref, start, step):
packing = get_dtype_packing(ref.dtype)
if packing == 1:
return [ref[start::step, :]], [ref[start + 1 :: step, :]]
assert packing in (2, 4, 8)
assert step % packing == 0
k_list, v_list = [], []
b_start = start // packing
b_step = step // packing
b_ref = ref.bitcast(jnp.uint32)
b = b_ref[b_start::b_step, :]
# TODO(chengjiyao): use the general strided loading logic for bf16 after
# fixing the issue in mosaic's infer vector layout pass
if ref.dtype == jnp.bfloat16:
bk = b << 16
bv = b & jnp.uint32(0xFFFF0000)
k = pltpu.bitcast(bk, jnp.float32).astype(jnp.bfloat16)
v = pltpu.bitcast(bv, jnp.float32).astype(jnp.bfloat16)
k_list.append(k)
v_list.append(v)
else:
bitwidth = 32 // packing
bitcast_dst_dtype = jnp.dtype(f"uint{bitwidth}")
for i in range(0, packing, 2):
bk = b >> (i * bitwidth)
k = pltpu.bitcast(bk.astype(bitcast_dst_dtype), ref.dtype)
k_list.append(k)
bv = b >> ((i + 1) * bitwidth)
v = pltpu.bitcast(bv.astype(bitcast_dst_dtype), ref.dtype)
v_list.append(v)
return k_list, v_list
def fold_on_2nd_minor(vec):
assert vec.dtype == jnp.bfloat16 or vec.dtype == jnp.float32
assert len(vec.shape) >= 2
last_dim = vec.shape[-1]
packing = get_dtype_packing(vec.dtype)
if vec.shape[-2] % packing != 0:
vec = vec.astype(jnp.float32)
return vec.reshape(-1, last_dim)
@pl.when(heads_blk_idx + q_blk_idx == 0)
def prefetch_first_kv_blk():
async_copy_kv = create_kv_async_copy_descriptors(
heads_blk_idx, init_seq_idx, 0, init_buf_idx
)
async_copy_kv.start()
def is_cur_q_blk_needed(q_states):
done, cur_seq_idx, _ = q_states
should_run = jnp.logical_and(q_len_start < cu_q_lens_ref[num_seqs],
cur_seq_idx < num_seqs)
return jnp.logical_and(done == 0, should_run)
def compute_with_cur_q_blk(q_states):
done, cur_seq_idx, cur_buf_idx = q_states
q_start = cu_q_lens_ref[cur_seq_idx]
q_end = cu_q_lens_ref[cur_seq_idx + 1]
q_len = q_end - q_start
kv_len = kv_lens_ref[cur_seq_idx]
def get_next_prefetch_ids(
heads_blk_idx, cur_seq_idx, kv_blk_idx, cur_buf_idx
):
next_kv_blk_idx = kv_blk_idx + 1
is_last_kv_blk = next_kv_blk_idx * num_kv_per_blk >= kv_len
next_kv_blk_idx = lax.select(
is_last_kv_blk,
0,
next_kv_blk_idx,
)
is_cur_seq_end_in_cur_q_blk = q_end <= q_len_end
next_seq_idx = lax.select(
is_last_kv_blk,
lax.select(is_cur_seq_end_in_cur_q_blk, cur_seq_idx + 1, cur_seq_idx),
cur_seq_idx,
)
is_last_seq = next_seq_idx == num_seqs
next_seq_idx = lax.select(
is_last_seq,
0,
next_seq_idx,
)
next_heads_blk_idx = lax.select(
is_last_seq,
heads_blk_idx + 1,
heads_blk_idx,
)
next_buf_idx = lax.select(cur_buf_idx == 0, 1, 0)
return next_heads_blk_idx, next_seq_idx, next_kv_blk_idx, next_buf_idx
def flash_attention(
q, # [num_q_per_blk * num_q_heads_per_kv_head, head_dim]
k, # [num_kv_per_blk, head_dim]
v, # [num_kv_per_blk, head_dim]
head_l_ref, # [num_q_per_blk * num_q_heads_per_kv_head, 128]
head_m_ref, # [num_q_per_blk * num_q_heads_per_kv_head, 128]
head_acc_ref, # [num_q_per_blk, num_q_heads_per_kv_head, head_dim]
*,
kv_blk_idx,
):
assert q.shape == (
num_q_per_blk * num_q_heads_per_kv_head,
head_dim,
)
assert (
k.shape
== v.shape
== (
num_kv_per_blk,
head_dim,
)
)
assert k.dtype == v.dtype
assert (
head_m_ref.shape
== head_l_ref.shape
== (
num_q_per_blk * num_q_heads_per_kv_head,
128,
)
)
assert head_acc_ref.shape == (
num_q_per_blk,
num_q_heads_per_kv_head,
head_dim,
)
kv_len_start = kv_blk_idx * num_kv_per_blk
def masked_store(ref, val, start, end, group=1):
iota = lax.broadcasted_iota(jnp.int32, ref.shape, 0) // group
pltpu.store(ref, val, mask=jnp.logical_and(iota >= start, iota < end))
def load_with_init(ref, init_val):
return jnp.where(
kv_blk_idx == 0, jnp.full_like(ref, init_val), ref[...]
)
# kv lens will be contracting dim, we should mask out the NaNs.
kv_mask = (
lax.broadcasted_iota(jnp.int32, k.shape, 0) < kv_len - kv_len_start
)
k = jnp.where(kv_mask, k.astype(jnp.float32), 0).astype(k.dtype)
v = jnp.where(kv_mask, v.astype(jnp.float32), 0).astype(v.dtype)
qk = (
jnp.einsum("nd,md->nm", q, k, preferred_element_type=jnp.float32)
* sm_scale
)
store_start = jnp.maximum(q_start - q_len_start, 0)
store_end = jnp.minimum(q_end - q_len_start, num_q_per_blk)
row_ids = (
(kv_len - q_len)
+ q_len_start
- q_start
+ jax.lax.broadcasted_iota(
jnp.int32,
(num_q_per_blk * num_q_heads_per_kv_head, num_kv_per_blk),
0,
)
// num_q_heads_per_kv_head
)
col_ids = kv_len_start + jax.lax.broadcasted_iota(
jnp.int32,
(num_q_per_blk * num_q_heads_per_kv_head, num_kv_per_blk),
1,
)
causal_mask = row_ids < col_ids
if sliding_window is not None:
causal_mask = jnp.logical_or(causal_mask,
row_ids - sliding_window >= col_ids)
if soft_cap is not None:
qk = soft_cap * jnp.tanh(qk / soft_cap)
qk += jnp.where(causal_mask, mask_value, 0.0)
m_curr = jnp.max(qk, axis=1, keepdims=True)
s_curr = jnp.exp(qk - m_curr)
qkv = jnp.dot(s_curr, v, preferred_element_type=jnp.float32)
lm_store_shape = head_m_ref.shape
m_curr = jnp.broadcast_to(m_curr, lm_store_shape)
l_curr = jnp.broadcast_to(
s_curr.sum(axis=1, keepdims=True), lm_store_shape
)
m_prev = load_with_init(head_m_ref, -jnp.inf)
l_prev = load_with_init(head_l_ref, 0.0)
m_next = jnp.maximum(m_prev, m_curr)
masked_store(
head_m_ref, m_next, store_start, store_end, num_q_heads_per_kv_head
)
alpha = jnp.exp(m_prev - m_next)
beta = jnp.exp(m_curr - m_next)
l_alpha = alpha * l_prev
l_next = l_alpha + beta * l_curr
l_next_safe = jnp.where(l_next == 0.0, 1.0, l_next)
masked_store(
head_l_ref,
l_next_safe,
store_start,
store_end,
num_q_heads_per_kv_head,
)
def broadcast_to_shape(arr, shape):
if arr.shape == shape:
return arr
assert len(arr.shape) == len(shape)
assert arr.shape[0] == shape[0]
assert shape[1] % arr.shape[1] == 0
# no-op concatenation.
return jnp.concatenate(
[arr for _ in range(shape[1] // arr.shape[1])], axis=1
)
o_curr = load_with_init(head_acc_ref, 0.0).reshape(-1, head_dim)
l_alpha = broadcast_to_shape(l_alpha, qkv.shape)
beta = broadcast_to_shape(beta, qkv.shape)
l_next_safe = broadcast_to_shape(l_next_safe, qkv.shape)
out = lax.div(
l_alpha * o_curr + beta * qkv,
l_next_safe,
)
masked_store(
head_acc_ref,
out.reshape(head_acc_ref.shape),
store_start,
store_end,
)
def is_valid_kv_blk_in_cur_seq(kv_states):
kv_blk_idx, _ = kv_states
return kv_blk_idx * num_kv_per_blk < kv_len
def compute_with_kv_blk_in_cur_seq(kv_states):
kv_blk_idx, cur_buf_idx = kv_states
next_heads_blk_idx, next_seq_idx, next_kv_blk_idx, next_buf_idx = (
get_next_prefetch_ids(
heads_blk_idx, cur_seq_idx, kv_blk_idx, cur_buf_idx
)
)
@pl.when(next_heads_blk_idx < num_heads_blks)
def prefetch_next_kv_blk():
# TODO(jevinjiang): reuse the same buffer if it is already prefetched!
# TODO(jevinjiang): only fetch effective dynamic size to hold kv_len and
# DMA to fixed size buffer!
next_async_copy_kv = create_kv_async_copy_descriptors(
next_heads_blk_idx, next_seq_idx, next_kv_blk_idx, next_buf_idx
)
next_async_copy_kv.start()
cur_async_copy_kv = create_kv_async_copy_descriptors(
heads_blk_idx, cur_seq_idx, kv_blk_idx, cur_buf_idx
)
kv_ref = cur_async_copy_kv.wait().reshape(
num_kv_pages_per_blk * page_size * num_combined_kv_heads_per_blk,
head_dim,
)
kv_packing = get_dtype_packing(kv_ref.dtype)
# NOTE: kv_packing is divided by 2 because k and v are packed together.
kv_load_step = max(1, kv_packing // 2)
for kv_head_chunk_idx in range(0, num_kv_heads_per_blk, kv_load_step):
k_list, v_list = strided_load_kv(
kv_ref, kv_head_chunk_idx * 2, num_combined_kv_heads_per_blk
)
for step_idx in range(kv_load_step):
k = k_list[step_idx]
v = v_list[step_idx]
if k_scale is not None:
# NOTE: Conversion between arbitrary data types is not supported.
# That's why it is converted to float32 first.
k = k.astype(jnp.float32) * k_scale
k = k.astype(q_ref.dtype)
if v_scale is not None:
v = v.astype(jnp.float32) * v_scale
v = v.astype(q_ref.dtype)
kv_head_idx = kv_head_chunk_idx + step_idx
q_head_idx = kv_head_idx * num_q_heads_per_kv_head
# TODO(jevinjiang): extra handling for packed type that can start at
# unaligned position!
q = fold_on_2nd_minor(
q_ref[:, q_head_idx : q_head_idx + num_q_heads_per_kv_head, :]
)
flash_attention(
q,
k,
v,
l_ref.at[kv_head_idx],
m_ref.at[kv_head_idx],
acc_ref.at[
:, q_head_idx : q_head_idx + num_q_heads_per_kv_head, :
],
kv_blk_idx=kv_blk_idx,
)
return kv_blk_idx + 1, next_buf_idx
_, next_buf_idx = lax.while_loop(
is_valid_kv_blk_in_cur_seq,
compute_with_kv_blk_in_cur_seq,
(0, cur_buf_idx), # (kv_blk_idx, buf_idx)
)
next_seq_idx = lax.select(q_end <= q_len_end, cur_seq_idx + 1, cur_seq_idx)
done = lax.select(q_end < q_len_end, done, 1)
return done, next_seq_idx, next_buf_idx
_, seq_idx, buf_idx = lax.while_loop(
is_cur_q_blk_needed,
compute_with_cur_q_blk,
(0, init_seq_idx, init_buf_idx), # (done, seq_idx, buf_idx)
)
# Reset seq_idx for next kv_heads_blk if run out of seqs!
seq_buf_idx_ref[0] = lax.select(seq_idx < num_seqs, seq_idx, 0)
seq_buf_idx_ref[1] = buf_idx
o_ref[...] = acc_ref[...].astype(q_ref.dtype)
def get_dtype_packing(dtype):
bits = dtypes.itemsize_bits(dtype)
return 32 // bits
def get_min_heads_per_blk(
num_q_heads, num_combined_kv_heads, q_dtype, kv_dtype
):
q_packing = get_dtype_packing(q_dtype)
kv_packing = get_dtype_packing(kv_dtype)
def can_be_xla_fully_tiled(x, packing):
if x % packing != 0:
return False
x //= packing
return x in (1, 2, 4, 8) or x % 8 == 0
# TODO(jevinjiang): support unaligned number of heads!
if not can_be_xla_fully_tiled(num_combined_kv_heads, kv_packing):
raise ValueError(
f"Not implemented: {num_combined_kv_heads=} can not be XLA fully tiled."
)
assert num_combined_kv_heads % 2 == 0
num_kv_heads = num_combined_kv_heads // 2
assert num_q_heads % num_kv_heads == 0
ratio = num_q_heads // num_kv_heads
# TODO(jevinjiang): we can choose smaller tiling for packed type if large
# second minor tiling is not on.
max_combined_kv_tiling = 8 * kv_packing
min_combined_kv_heads = (
max_combined_kv_tiling
if num_combined_kv_heads % max_combined_kv_tiling == 0
else num_combined_kv_heads
)
min_q_heads = min_combined_kv_heads // 2 * ratio
if can_be_xla_fully_tiled(min_q_heads, q_packing):
return min_q_heads, min_combined_kv_heads
return num_q_heads, num_combined_kv_heads
@functools.partial(
jax.jit,
static_argnames=[
"sm_scale",
"mask_value",
"num_kv_pages_per_block",
"num_queries_per_block",
"vmem_limit_bytes",
"sliding_window",
"soft_cap",
"k_scale",
"v_scale",
],
)
def ragged_paged_attention(
q: jax.Array, # [max_num_batched_tokens, num_q_heads, head_dim]
# TODO(jevinjiang): create a write_to_kv_cache kernel!
kv_pages: jax.Array, # [total_num_pages, page_size, num_combined_kv_heads, head_dim]
kv_lens: jax.Array, # i32[max_num_seqs]
page_indices: jax.Array, # i32[max_num_seqs, pages_per_seq]
cu_q_lens: jax.Array, # i32[max_num_seqs + 1]
num_seqs: jax.Array, # i32[1]
*,
sm_scale: float = 1.0,
sliding_window: int | None = None,
soft_cap: float | None = None,
mask_value: float | None = DEFAULT_MASK_VALUE,
k_scale: float | None = None,
v_scale: float | None = None,
num_kv_pages_per_block: int | None = None,
num_queries_per_block: int | None = None,
vmem_limit_bytes: int | None = None,
):
"""Ragged paged attention that supports mixed prefill and decode.
Args:
q: concatenated all sequences' queries.
kv_pages: paged KV cache. Normally in HBM.
kv_lens: padded kv lengths. Only the first num_seqs values are valid.
page_indices: the first index indicates which page to use in the kv cache
for each sequence. Only the first num_seqs values are valid.
cu_q_lens: the cumulative sum of the effective query lengths. Similar to
kv_lens, only the first num_seqs+1 values are valid.
num_seqs: the dynamic number of sequences.
sm_scale: the softmax scale which will be applied to the Q@K^T.
sliding_window: the sliding window size for the attention.
soft_cap: the logit soft cap for the attention.
mask_value: mask value for causal mask.
k_scale: the scale for the key cache.
v_scale: the scale for the value cache.
num_kv_pages_per_block: number of kv pages to be processed in one flash
attention block in the pallas kernel.
num_queries_per_block: number of kv pages to be processed in one flash
attention block in the pallas kernel.
vmem_limit_bytes: the vmem limit for the pallas kernel.
Returns:
The output of the attention.
"""
static_validate_inputs(
q,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs,
sm_scale=sm_scale,
sliding_window=sliding_window,
soft_cap=soft_cap,
mask_value=mask_value,
k_scale=k_scale,
v_scale=v_scale,
num_kv_pages_per_block=num_kv_pages_per_block,
num_queries_per_block=num_queries_per_block,
vmem_limit_bytes=vmem_limit_bytes,
)
if mask_value is None:
mask_value = DEFAULT_MASK_VALUE
num_q_tokens, num_q_heads, head_dim = q.shape
_, page_size, num_combined_kv_heads, _ = kv_pages.shape
assert num_combined_kv_heads % 2 == 0
num_kv_heads = num_combined_kv_heads // 2
_, pages_per_seq = page_indices.shape
num_q_heads_per_blk, num_combined_kv_heads_per_blk = get_min_heads_per_blk(
num_q_heads, num_combined_kv_heads, q.dtype, kv_pages.dtype
)
num_q_per_blk = num_queries_per_block
num_kv_pages_per_blk = num_kv_pages_per_block
if num_q_per_blk is None or num_kv_pages_per_blk is None:
num_kv_pages_per_blk, num_q_per_blk = get_tuned_block_sizes(
q.dtype,
kv_pages.dtype,
num_q_heads_per_blk,
num_combined_kv_heads_per_blk // 2,
head_dim,
page_size,
num_q_tokens,
pages_per_seq,
)
num_q_heads_per_kv_head = num_q_heads // num_kv_heads
num_q_blks = pl.cdiv(num_q_tokens, num_q_per_blk)
assert num_combined_kv_heads_per_blk % 2 == 0
num_kv_heads_per_blk = num_combined_kv_heads_per_blk // 2
assert num_q_heads_per_blk % num_q_heads_per_kv_head == 0
num_heads_blks = num_q_heads // num_q_heads_per_blk
grid = (num_heads_blks, num_q_blks)
def q_index_map(heads_blk_idx, q_blk_idx, *_):
return (q_blk_idx, heads_blk_idx, 0)
q_block_spec = pl.BlockSpec(
(num_q_per_blk, num_q_heads_per_blk, head_dim),
q_index_map,
)
in_specs = [
q_block_spec,
pl.BlockSpec(memory_space=pltpu.ANY),
]
out_specs = q_block_spec
lm_scratch = pltpu.VMEM(
# TODO(jevinjiang): use 128 instead of 1 is due to Mosaic does not support
# unaligned slicing!
(num_kv_heads_per_blk, num_q_per_blk * num_q_heads_per_kv_head, 128),
jnp.float32,
)
acc_scratch = pltpu.VMEM(
(num_q_per_blk, num_q_heads_per_blk, head_dim),
jnp.float32,
)
double_buf_scratch = pltpu.VMEM(
(
2, # For double buffering during DMA copies.
num_kv_pages_per_blk,
page_size,
num_combined_kv_heads_per_blk,
head_dim,
),
kv_pages.dtype,
)
scratch_shapes = [
double_buf_scratch, # kv_bufs
pltpu.SemaphoreType.DMA((2,)), # Semaphores for double buffers.
lm_scratch, # l_ref
lm_scratch, # m_ref
acc_scratch,
]
scalar_prefetches = (
kv_lens,
page_indices,
cu_q_lens,
jnp.array((0, 0), jnp.int32), # seq_idx, buf_idx
num_seqs,
)
kernel = pl.pallas_call(
functools.partial(
ragged_paged_attention_kernel,
sm_scale=sm_scale,
sliding_window=sliding_window,
soft_cap=soft_cap,
mask_value=mask_value,
k_scale=k_scale,
v_scale=v_scale,
),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=len(scalar_prefetches),
in_specs=in_specs,
out_specs=out_specs,
grid=grid,
scratch_shapes=scratch_shapes,
),
compiler_params=pltpu.CompilerParams(
dimension_semantics=(
"arbitrary",
"arbitrary",
),
vmem_limit_bytes=vmem_limit_bytes,
),
out_shape=jax.ShapeDtypeStruct(shape=q.shape, dtype=q.dtype),
name="ragged_paged_attention_kernel",
)
return kernel(*scalar_prefetches, q, kv_pages)
| MultiPageAsyncCopyDescriptor |
python | kamyu104__LeetCode-Solutions | Python/interleaving-string.py | {
"start": 1667,
"end": 2439
} | class ____(object):
# @return a boolean
def isInterleave(self, s1, s2, s3):
self.match = {}
if len(s1) + len(s2) != len(s3):
return False
return self.isInterleaveRecu(s1, s2, s3, 0, 0, 0)
def isInterleaveRecu(self, s1, s2, s3, a, b, c):
if repr([a, b]) in self.match.keys():
return self.match[repr([a, b])]
if c == len(s3):
return True
result = False
if a < len(s1) and s1[a] == s3[c]:
result = result or self.isInterleaveRecu(s1, s2, s3, a + 1, b, c + 1)
if b < len(s2) and s2[b] == s3[c]:
result = result or self.isInterleaveRecu(s1, s2, s3, a, b + 1, c + 1)
self.match[repr([a, b])] = result
return result
| Solution3 |
python | facebook__pyre-check | client/commands/tests/validate_models_test.py | {
"start": 379,
"end": 3417
} | class ____(testslide.TestCase):
def test_parse_response(self) -> None:
def assert_parsed(
payload: object, expected: Iterable[error.ModelVerificationError]
) -> None:
self.assertEqual(
validate_models.parse_validation_errors_response(payload),
list(expected),
)
def assert_not_parsed(payload: object) -> None:
with self.assertRaises(daemon_query.InvalidQueryResponse):
validate_models.parse_validation_errors_response(payload)
assert_not_parsed(42)
assert_not_parsed("derp")
assert_not_parsed({})
assert_not_parsed({"no_response": 42})
assert_not_parsed({"response": 42})
assert_not_parsed({"response": {"errors": 42}})
assert_parsed({"response": {}}, expected=[])
assert_parsed({"response": {"errors": []}}, expected=[])
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
with setup.switch_working_directory(root_path):
assert_parsed(
{
"response": {
"errors": [
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 2,
"path": str(root_path / "test.py"),
"description": "Some description",
"code": 1001,
},
{
"line": 3,
"column": 3,
"stop_line": 4,
"stop_column": 4,
"path": None,
"description": "Some description",
"code": 1001,
},
]
}
},
expected=[
error.ModelVerificationError(
line=3,
column=3,
stop_line=4,
stop_column=4,
path=None,
description="Some description",
code=1001,
),
error.ModelVerificationError(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("test.py"),
description="Some description",
code=1001,
),
],
)
| ValidateModelsTest |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/dense_attention.py | {
"start": 14387,
"end": 20834
} | class ____(BaseDenseAttention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]`
and `[batch_size, 1, Tv, dim]` respectively.
2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear
sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)`
3. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
4. Use `distribution` to create a linear combination of `value` with
shape `[batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a variable to scale the attention scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores.
Call Args:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
Output:
Attention outputs of shape `[batch_size, Tq, dim]`.
[Optional] Attention scores after masking and softmax with shape
`[batch_size, Tq, Tv]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `AdditiveAttention` in a CNN+Attention
network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(value_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.AdditiveAttention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=True, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
v_shape = tensor_shape.TensorShape(input_shape[1])
dim = v_shape[-1]
if isinstance(dim, tensor_shape.Dimension):
dim = dim.value
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=[dim],
initializer=init_ops.glorot_uniform_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(AdditiveAttention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = array_ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = array_ops.expand_dims(key, axis=-3)
if self.use_scale:
scale = self.scale
else:
scale = 1.
return math_ops.reduce_sum(
scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
config = {'use_scale': self.use_scale}
base_config = super(AdditiveAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _lower_triangular_mask(shape):
"""Creates a lower-triangular boolean mask over the last 2 dimensions."""
row_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
col_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
return math_ops.greater_equal(row_index, col_index)
def _merge_masks(x, y):
if x is None:
return y
if y is None:
return x
return math_ops.logical_and(x, y)
| AdditiveAttention |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 79791,
"end": 81474
} | class ____(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
| SSLObjectTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar17.py | {
"start": 630,
"end": 712
} | class ____:
def write(self, __buffer: ReadableBuffer) -> int: ...
| BufferedWriter |
python | ansible__ansible | test/units/playbook/test_helpers.py | {
"start": 15917,
"end": 17417
} | class ____(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
mock_play = MagicMock(name='MockPlay')
self.assertRaises(AssertionError, helpers.load_list_of_blocks,
ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_block(self):
ds = [{}]
mock_play = MagicMock(name='MockPlay')
self.assertRaisesRegex(errors.AnsibleParserError,
"no module/action detected in task",
helpers.load_list_of_blocks,
ds, mock_play,
parent_block=None,
role=None,
task_include=None,
use_handlers=False,
variable_manager=None,
loader=None)
def test_block_unknown_action(self):
ds = [{'action': 'foo', 'collections': []}]
mock_play = MagicMock(name='MockPlay')
res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
loader=None)
self.assertIsInstance(res, list)
for block in res:
self.assertIsInstance(block, Block)
| TestLoadListOfBlocks |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_build_details_models.py | {
"start": 1062,
"end": 1371
} | class ____(BaseModel):
head_sha: str | None = None
base_sha: str | None = None
provider: str | None = None
head_repo_name: str | None = None
base_repo_name: str | None = None
head_ref: str | None = None
base_ref: str | None = None
pr_number: int | None = None
| BuildDetailsVcsInfo |
python | django__django | django/db/models/fields/related.py | {
"start": 50560,
"end": 54116
} | class ____(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
forward_related_accessor_class = ForwardOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete, to_field=None, **kwargs):
kwargs["unique"] = True
super().__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if "unique" in kwargs:
del kwargs["unique"]
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
# Remote field object must be cleared otherwise Model.save()
# will reassign attname using the related object pk.
if data is None:
setattr(instance, self.name, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = "%s_%s" % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = "to_%s" % to
from_ = "from_%s" % from_
meta = type(
"Meta",
(),
{
"db_table": field._get_m2m_db_table(klass._meta),
"auto_created": klass,
"app_label": klass._meta.app_label,
"db_tablespace": klass._meta.db_tablespace,
"unique_together": (from_, to),
"verbose_name": _("%(from)s-%(to)s relationship")
% {"from": from_, "to": to},
"verbose_name_plural": _("%(from)s-%(to)s relationships")
% {"from": from_, "to": to},
"apps": field.model._meta.apps,
},
)
# Construct and return the new class.
return type(
name,
(models.Model,),
{
"Meta": meta,
"__module__": klass.__module__,
from_: models.ForeignKey(
klass,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
},
)
| OneToOneField |
python | pandas-dev__pandas | pandas/tests/series/test_arithmetic.py | {
"start": 822,
"end": 6223
} | class ____:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(
lambda x: Series(range(10), dtype=np.float64),
lambda x: Series(range(10), dtype=np.float64),
True,
),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = Series(
np.arange(20, dtype=np.float64),
index=date_range("2020-01-01", periods=20),
name="ts",
)
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
ser = Series([0, 1, np.nan, 3, 4, 5])
exp = ser.fillna(0).add(2)
res = ser.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
@pytest.mark.parametrize("kind", ["datetime", "timedelta"])
def test_rhs_extension_array_sub_with_fill_value(self, kind):
# GH:62467
if kind == "datetime":
left = Series(
[pd.Timestamp("2025-08-20"), pd.Timestamp("2025-08-21")],
dtype=np.dtype("datetime64[ns]"),
)
else:
left = Series(
[Timedelta(days=1), Timedelta(days=2)],
dtype=np.dtype("timedelta64[ns]"),
)
right = (
left._values
) # DatetimeArray or TimedeltaArray which is an ExtensionArray
result = left.sub(right, fill_value=left.iloc[0])
expected = Series(np.zeros(len(left), dtype=np.dtype("timedelta64[ns]")))
tm.assert_series_equal(result, expected)
def test_flex_disallows_dataframe(self):
# GH#46179
df = pd.DataFrame(
{2010: [1], 2020: [3]},
index=pd.MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]),
)
ser = Series(
[10.0, 20.0, 30.0],
index=pd.MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
msg = "Series.add does not support a DataFrame `other`"
with pytest.raises(TypeError, match=msg):
ser.add(df, axis=0)
| TestSeriesFlexArithmetic |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 23208,
"end": 24576
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = Wav2Vec2Attention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| Wav2Vec2EncoderLayer |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_node_provider.py | {
"start": 2414,
"end": 3917
} | class ____(CloudInstanceProviderTesterBase):
def __init__(self, **kwargs):
self.config_reader = FileConfigReader(
get_test_config_path("test_ray_complex.yaml"), skip_content_hash=True
)
self.config = self.config_reader.get_cached_autoscaling_config()
self.ray_session = None
os.environ["RAY_FAKE_CLUSTER"] = "1"
provider_config = self.config.get_provider_config()
# This is a bit hacky but we need a fake head node.
self.ray_session = ray.init()
provider_config["gcs_address"] = self.ray_session.address_info["gcs_address"]
provider_config["head_node_id"] = self.ray_session.address_info["node_id"]
provider_config["launch_multiple"] = True
self.base_provider = FakeMultiNodeProvider(
provider_config,
cluster_name="test",
)
provider = NodeProviderAdapter(
self.base_provider,
self.config_reader,
)
super().__init__(provider, self.config)
def get_non_terminated(self):
nodes = self.inner_provider.get_non_terminated()
nodes.pop(self.ray_session.address_info["node_id"], None)
return nodes
def shutdown(self):
ray.shutdown()
def _add_creation_error(self, e: Exception):
self.base_provider._test_set_creation_error(e)
def _add_termination_errors(self, e: Exception):
self.base_provider._test_add_termination_errors(e)
| FakeMultiNodeProviderTester |
python | numba__numba | numba/tests/test_unicode.py | {
"start": 93279,
"end": 97319
} | class ____(BaseTest):
def test_ord(self):
pyfunc = ord_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for a in ex:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_ord_invalid(self):
self.disable_leak_check()
pyfunc = ord_usecase
cfunc = njit(pyfunc)
# wrong number of chars
for func in (pyfunc, cfunc):
for ch in ('', 'abc'):
with self.assertRaises(TypeError) as raises:
func(ch)
self.assertIn('ord() expected a character',
str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc(1.23)
self.assertIn(_header_lead, str(raises.exception))
def test_chr(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for x in ex:
a = ord(x)
self.assertPreciseEqual(pyfunc(a), cfunc(a))
# test upper/lower bounds
for a in (0x0, _MAX_UNICODE):
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_chr_invalid(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
# value negative/>_MAX_UNICODE
for func in (pyfunc, cfunc):
for v in (-2, _MAX_UNICODE + 1):
with self.assertRaises(ValueError) as raises:
func(v)
self.assertIn("chr() arg not in range", str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc('abc')
self.assertIn(_header_lead, str(raises.exception))
def test_unicode_type_mro(self):
# see issue #5635
def bar(x):
return True
@overload(bar)
def ol_bar(x):
ok = False
if isinstance(x, types.UnicodeType):
if isinstance(x, types.Hashable):
ok = True
return lambda x: ok
@njit
def foo(strinst):
return bar(strinst)
inst = "abc"
self.assertEqual(foo.py_func(inst), foo(inst))
self.assertIn(types.Hashable, types.unicode_type.__class__.__mro__)
def test_f_strings(self):
"""test f-string support, which requires bytecode handling
"""
# requires formatting (FORMAT_VALUE) and concatenation (BUILD_STRINGS)
def impl1(a):
return f"AA_{a + 3}_B"
# does not require concatenation
def impl2(a):
return f"{a + 2}"
# no expression
def impl3(a):
return f"ABC_{a}"
# format spec not allowed
def impl4(a):
return f"ABC_{a:0}"
# corner case: empty string
def impl5():
return f"" # noqa: F541
self.assertEqual(impl1(3), njit(impl1)(3))
self.assertEqual(impl2(2), njit(impl2)(2))
# string input
self.assertEqual(impl3("DE"), njit(impl3)("DE"))
# check output when there's no __str__ or __repr__ defined
list_arg = ["A", "B"]
got = njit(impl3)(list_arg)
expected = f"ABC_<object type:{typeof(list_arg)}>"
self.assertEqual(got, expected)
# check error when format spec provided
unsupported_errors = (UnsupportedError, UnsupportedBytecodeError)
with self.assertRaises(unsupported_errors) as raises:
njit(impl4)(["A", "B"])
if PYVERSION in ((3, 13), (3, 14)):
msg = "Use of unsupported opcode (FORMAT_WITH_SPEC)"
self.assertIn(msg, str(raises.exception))
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
msg = "format spec in f-strings not supported yet"
self.assertIn(msg, str(raises.exception))
else:
raise NotImplementedError(PYVERSION)
self.assertEqual(impl5(), njit(impl5)())
if __name__ == '__main__':
unittest.main()
| TestUnicodeAuxillary |
python | doocs__leetcode | solution/2300-2399/2319.Check if Matrix Is X-Matrix/Solution.py | {
"start": 0,
"end": 352
} | class ____:
def checkXMatrix(self, grid: List[List[int]]) -> bool:
for i, row in enumerate(grid):
for j, v in enumerate(row):
if i == j or i + j == len(grid) - 1:
if v == 0:
return False
elif v:
return False
return True
| Solution |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 67163,
"end": 67275
} | class ____(Config):
enabled = c.Type(bool, default=True)
bar = c.Type(int, default=0)
| _EnabledPluginConfig |
python | python-openxml__python-docx | tests/test_table.py | {
"start": 23103,
"end": 30059
} | class ____:
"""Unit-test suite for `docx.table._Row` objects."""
@pytest.mark.parametrize(
("tr_cxml", "expected_value"),
[
("w:tr", 0),
("w:tr/w:trPr", 0),
("w:tr/w:trPr/w:gridAfter{w:val=0}", 0),
("w:tr/w:trPr/w:gridAfter{w:val=4}", 4),
],
)
def it_knows_its_grid_cols_after(self, tr_cxml: str, expected_value: int | None, parent_: Mock):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
assert row.grid_cols_after == expected_value
@pytest.mark.parametrize(
("tr_cxml", "expected_value"),
[
("w:tr", 0),
("w:tr/w:trPr", 0),
("w:tr/w:trPr/w:gridBefore{w:val=0}", 0),
("w:tr/w:trPr/w:gridBefore{w:val=3}", 3),
],
)
def it_knows_its_grid_cols_before(
self, tr_cxml: str, expected_value: int | None, parent_: Mock
):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
assert row.grid_cols_before == expected_value
@pytest.mark.parametrize(
("tr_cxml", "expected_value"),
[
("w:tr", None),
("w:tr/w:trPr", None),
("w:tr/w:trPr/w:trHeight", None),
("w:tr/w:trPr/w:trHeight{w:val=0}", 0),
("w:tr/w:trPr/w:trHeight{w:val=1440}", 914400),
],
)
def it_knows_its_height(self, tr_cxml: str, expected_value: int | None, parent_: Mock):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
assert row.height == expected_value
@pytest.mark.parametrize(
("tr_cxml", "new_value", "expected_cxml"),
[
("w:tr", Inches(1), "w:tr/w:trPr/w:trHeight{w:val=1440}"),
("w:tr/w:trPr", Inches(1), "w:tr/w:trPr/w:trHeight{w:val=1440}"),
("w:tr/w:trPr/w:trHeight", Inches(1), "w:tr/w:trPr/w:trHeight{w:val=1440}"),
(
"w:tr/w:trPr/w:trHeight{w:val=1440}",
Inches(2),
"w:tr/w:trPr/w:trHeight{w:val=2880}",
),
("w:tr/w:trPr/w:trHeight{w:val=2880}", None, "w:tr/w:trPr/w:trHeight"),
("w:tr", None, "w:tr/w:trPr"),
("w:tr/w:trPr", None, "w:tr/w:trPr"),
("w:tr/w:trPr/w:trHeight", None, "w:tr/w:trPr/w:trHeight"),
],
)
def it_can_change_its_height(
self, tr_cxml: str, new_value: Length | None, expected_cxml: str, parent_: Mock
):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
row.height = new_value
assert row._tr.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("tr_cxml", "expected_value"),
[
("w:tr", None),
("w:tr/w:trPr", None),
("w:tr/w:trPr/w:trHeight{w:val=0, w:hRule=auto}", WD_ROW_HEIGHT.AUTO),
(
"w:tr/w:trPr/w:trHeight{w:val=1440, w:hRule=atLeast}",
WD_ROW_HEIGHT.AT_LEAST,
),
(
"w:tr/w:trPr/w:trHeight{w:val=2880, w:hRule=exact}",
WD_ROW_HEIGHT.EXACTLY,
),
],
)
def it_knows_its_height_rule(
self, tr_cxml: str, expected_value: WD_ROW_HEIGHT | None, parent_: Mock
):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
assert row.height_rule == expected_value
@pytest.mark.parametrize(
("tr_cxml", "new_value", "expected_cxml"),
[
("w:tr", WD_ROW_HEIGHT.AUTO, "w:tr/w:trPr/w:trHeight{w:hRule=auto}"),
(
"w:tr/w:trPr",
WD_ROW_HEIGHT.AT_LEAST,
"w:tr/w:trPr/w:trHeight{w:hRule=atLeast}",
),
(
"w:tr/w:trPr/w:trHeight",
WD_ROW_HEIGHT.EXACTLY,
"w:tr/w:trPr/w:trHeight{w:hRule=exact}",
),
(
"w:tr/w:trPr/w:trHeight{w:val=1440, w:hRule=exact}",
WD_ROW_HEIGHT.AUTO,
"w:tr/w:trPr/w:trHeight{w:val=1440, w:hRule=auto}",
),
(
"w:tr/w:trPr/w:trHeight{w:val=1440, w:hRule=auto}",
None,
"w:tr/w:trPr/w:trHeight{w:val=1440}",
),
("w:tr", None, "w:tr/w:trPr"),
("w:tr/w:trPr", None, "w:tr/w:trPr"),
("w:tr/w:trPr/w:trHeight", None, "w:tr/w:trPr/w:trHeight"),
],
)
def it_can_change_its_height_rule(
self, tr_cxml: str, new_value: WD_ROW_HEIGHT | None, expected_cxml: str, parent_: Mock
):
row = _Row(cast(CT_Row, element(tr_cxml)), parent_)
row.height_rule = new_value
assert row._tr.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("tbl_cxml", "row_idx", "expected_len"),
[
# -- cell corresponds to single layout-grid cell --
("w:tbl/w:tr/w:tc/w:p", 0, 1),
# -- cell has a horizontal span --
("w:tbl/w:tr/w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p)", 0, 2),
# -- cell is in latter row of vertical span --
(
"w:tbl/(w:tr/w:tc/(w:tcPr/w:vMerge{w:val=restart},w:p),"
"w:tr/w:tc/(w:tcPr/w:vMerge,w:p))",
1,
1,
),
# -- cell both has horizontal span and is latter row of vertical span --
(
"w:tbl/(w:tr/w:tc/(w:tcPr/(w:gridSpan{w:val=2},w:vMerge{w:val=restart}),w:p),"
"w:tr/w:tc/(w:tcPr/(w:gridSpan{w:val=2},w:vMerge),w:p))",
1,
2,
),
],
)
def it_provides_access_to_its_cells(
self, tbl_cxml: str, row_idx: int, expected_len: int, parent_: Mock
):
tbl = cast(CT_Tbl, element(tbl_cxml))
tr = tbl.tr_lst[row_idx]
table = Table(tbl, parent_)
row = _Row(tr, table)
cells = row.cells
assert len(cells) == expected_len
assert all(type(c) is _Cell for c in cells)
def it_provides_access_to_the_table_it_belongs_to(self, parent_: Mock, table_: Mock):
parent_.table = table_
row = _Row(cast(CT_Row, element("w:tr")), parent_)
assert row.table is table_
def it_knows_its_index_in_table_to_help(self, parent_: Mock):
tbl = element("w:tbl/(w:tr,w:tr,w:tr)")
row = _Row(cast(CT_Row, tbl[1]), parent_)
assert row._index == 1
# fixtures -------------------------------------------------------
@pytest.fixture
def _index_prop_(self, request: FixtureRequest):
return property_mock(request, _Row, "_index")
@pytest.fixture
def parent_(self, request: FixtureRequest):
return instance_mock(request, Table)
@pytest.fixture
def table_(self, request: FixtureRequest):
return instance_mock(request, Table)
@pytest.fixture
def table_prop_(self, request: FixtureRequest, table_: Mock):
return property_mock(request, _Row, "table")
| Describe_Row |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 30554,
"end": 32336
} | class ____:
def _test_abs_func(self, absfunc, test_dtype):
x = test_dtype(-1.5)
assert_equal(absfunc(x), 1.5)
x = test_dtype(0.0)
res = absfunc(x)
# assert_equal() checks zero signedness
assert_equal(res, 0.0)
x = test_dtype(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = test_dtype(np.finfo(test_dtype).max)
assert_equal(absfunc(x), x.real)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
x = test_dtype(np.finfo(test_dtype).tiny)
assert_equal(absfunc(x), x.real)
x = test_dtype(np.finfo(test_dtype).min)
assert_equal(absfunc(x), -x.real)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_builtin_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(abs, dtype)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_numpy_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(np.abs, dtype)
| TestAbs |
python | huggingface__transformers | src/transformers/models/regnet/modeling_regnet.py | {
"start": 2085,
"end": 2959
} | class ____(nn.Module):
"""
RegNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: RegNetConfig):
super().__init__()
self.embedder = RegNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act
)
self.num_channels = config.num_channels
def forward(self, pixel_values):
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
hidden_state = self.embedder(pixel_values)
return hidden_state
# Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut with ResNet->RegNet
| RegNetEmbeddings |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/_stubs.py | {
"start": 796,
"end": 1112
} | class ____(TypedDict):
Key: str
LastModified: datetime
ETag: Optional[str]
ChecksumAlgorithm: Optional[list[ChecksumAlgorithmType]]
Size: Optional[int]
StorageClass: Optional[ObjectStorageClassType]
Owner: Optional[OwnerTypeDef]
RestoreStatus: Optional[RestoreStatusTypeDef]
| ObjectTypeDef |
python | ray-project__ray | rllib/utils/exploration/random_encoder.py | {
"start": 717,
"end": 3810
} | class ____:
"""Track moving mean, std and count."""
def __init__(self, epsilon: float = 1e-4, shape: Optional[List[int]] = None):
"""Initialize object.
Args:
epsilon: Initial count.
shape: Shape of the trackables mean and std.
"""
if not shape:
shape = []
self.mean = np.zeros(shape, dtype=np.float32)
self.var = np.ones(shape, dtype=np.float32)
self.count = epsilon
def __call__(self, inputs: np.ndarray) -> np.ndarray:
"""Normalize input batch using moving mean and std.
Args:
inputs: Input batch to normalize.
Returns:
Logarithmic scaled normalized output.
"""
batch_mean = np.mean(inputs, axis=0)
batch_var = np.var(inputs, axis=0)
batch_count = inputs.shape[0]
self.update_params(batch_mean, batch_var, batch_count)
return np.log(inputs / self.std + 1)
def update_params(
self, batch_mean: float, batch_var: float, batch_count: float
) -> None:
"""Update moving mean, std and count.
Args:
batch_mean: Input batch mean.
batch_var: Input batch variance.
batch_count: Number of cases in the batch.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
# This moving mean calculation is from reference implementation.
self.mean = self.mean + delta + batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.power(delta, 2) * self.count * batch_count / tot_count
self.var = M2 / tot_count
self.count = tot_count
@property
def std(self) -> float:
"""Get moving standard deviation.
Returns:
Returns moving standard deviation.
"""
return np.sqrt(self.var)
@OldAPIStack
def update_beta(beta_schedule: str, beta: float, rho: float, step: int) -> float:
"""Update beta based on schedule and training step.
Args:
beta_schedule: Schedule for beta update.
beta: Initial beta.
rho: Schedule decay parameter.
step: Current training iteration.
Returns:
Updated beta as per input schedule.
"""
if beta_schedule == "linear_decay":
return beta * ((1.0 - rho) ** step)
return beta
@OldAPIStack
def compute_states_entropy(
obs_embeds: np.ndarray, embed_dim: int, k_nn: int
) -> np.ndarray:
"""Compute states entropy using K nearest neighbour method.
Args:
obs_embeds: Observation latent representation using
encoder model.
embed_dim: Embedding vector dimension.
k_nn: Number of nearest neighbour for K-NN estimation.
Returns:
Computed states entropy.
"""
obs_embeds_ = np.reshape(obs_embeds, [-1, embed_dim])
dist = np.linalg.norm(obs_embeds_[:, None, :] - obs_embeds_[None, :, :], axis=-1)
return dist.argsort(axis=-1)[:, :k_nn][:, -1].astype(np.float32)
@OldAPIStack
| _MovingMeanStd |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_setops.py | {
"start": 248,
"end": 12547
} | class ____:
def test_union(self, sort):
# union
other1 = period_range("1/1/2000", freq="D", periods=5)
rng1 = period_range("1/6/2000", freq="D", periods=5)
expected1 = PeriodIndex(
[
"2000-01-06",
"2000-01-07",
"2000-01-08",
"2000-01-09",
"2000-01-10",
"2000-01-01",
"2000-01-02",
"2000-01-03",
"2000-01-04",
"2000-01-05",
],
freq="D",
)
rng2 = period_range("1/1/2000", freq="D", periods=5)
other2 = period_range("1/4/2000", freq="D", periods=5)
expected2 = period_range("1/1/2000", freq="D", periods=8)
rng3 = period_range("1/1/2000", freq="D", periods=5)
other3 = PeriodIndex([], freq="D")
expected3 = period_range("1/1/2000", freq="D", periods=5)
rng4 = period_range("2000-01-01 09:00", freq="h", periods=5)
other4 = period_range("2000-01-02 09:00", freq="h", periods=5)
expected4 = PeriodIndex(
[
"2000-01-01 09:00",
"2000-01-01 10:00",
"2000-01-01 11:00",
"2000-01-01 12:00",
"2000-01-01 13:00",
"2000-01-02 09:00",
"2000-01-02 10:00",
"2000-01-02 11:00",
"2000-01-02 12:00",
"2000-01-02 13:00",
],
freq="h",
)
rng5 = PeriodIndex(
["2000-01-01 09:01", "2000-01-01 09:03", "2000-01-01 09:05"], freq="min"
)
other5 = PeriodIndex(
["2000-01-01 09:01", "2000-01-01 09:05", "2000-01-01 09:08"], freq="min"
)
expected5 = PeriodIndex(
[
"2000-01-01 09:01",
"2000-01-01 09:03",
"2000-01-01 09:05",
"2000-01-01 09:08",
],
freq="min",
)
rng6 = period_range("2000-01-01", freq="M", periods=7)
other6 = period_range("2000-04-01", freq="M", periods=7)
expected6 = period_range("2000-01-01", freq="M", periods=10)
rng7 = period_range("2003-01-01", freq="Y", periods=5)
other7 = period_range("1998-01-01", freq="Y", periods=8)
expected7 = PeriodIndex(
[
"2003",
"2004",
"2005",
"2006",
"2007",
"1998",
"1999",
"2000",
"2001",
"2002",
],
freq="Y",
)
rng8 = PeriodIndex(
["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"], freq="D"
)
other8 = period_range("1/6/2000", freq="D", periods=5)
expected8 = PeriodIndex(
[
"1/3/2000",
"1/2/2000",
"1/1/2000",
"1/5/2000",
"1/4/2000",
"1/6/2000",
"1/7/2000",
"1/8/2000",
"1/9/2000",
"1/10/2000",
],
freq="D",
)
for rng, other, expected in [
(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7),
(rng8, other8, expected8),
]:
result_union = rng.union(other, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result_union, expected)
def test_union_misc(self, sort):
index = period_range("1/1/2000", "1/20/2000", freq="D")
result = index[:-5].union(index[10:], sort=sort)
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]), sort=sort)
if sort is False:
tm.assert_index_equal(result.sort_values(), index)
else:
tm.assert_index_equal(result, index)
# cast if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED")
result = index.union(index2, sort=sort)
expected = index.astype(object).union(index2.astype(object), sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection(self, sort):
index = period_range("1/1/2000", "1/20/2000", freq="D")
result = index[:-5].intersection(index[10:], sort=sort)
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right, sort=sort)
if sort is False:
tm.assert_index_equal(result.sort_values(), index[10:-5])
else:
tm.assert_index_equal(result, index[10:-5])
# cast if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED")
result = index.intersection(index2, sort=sort)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
index3 = period_range("1/1/2000", "1/20/2000", freq="2D")
result = index.intersection(index3, sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection_cases(self, sort):
base = period_range("6/1/2000", "6/30/2000", freq="D", name="idx")
# if target has the same name, it is preserved
rng2 = period_range("5/15/2000", "6/20/2000", freq="D", name="idx")
expected2 = period_range("6/1/2000", "6/20/2000", freq="D", name="idx")
# if target name is different, it will be reset
rng3 = period_range("5/15/2000", "6/20/2000", freq="D", name="other")
expected3 = period_range("6/1/2000", "6/20/2000", freq="D", name=None)
rng4 = period_range("7/1/2000", "7/31/2000", freq="D", name="idx")
expected4 = PeriodIndex([], name="idx", freq="D")
for rng, expected in [
(rng2, expected2),
(rng3, expected3),
(rng4, expected4),
]:
result = base.intersection(rng, sort=sort)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# non-monotonic
base = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"],
freq="D",
name="idx",
)
rng2 = PeriodIndex(
["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"],
freq="D",
name="idx",
)
expected2 = PeriodIndex(["2011-01-04", "2011-01-02"], freq="D", name="idx")
rng3 = PeriodIndex(
["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"],
freq="D",
name="other",
)
expected3 = PeriodIndex(["2011-01-04", "2011-01-02"], freq="D", name=None)
rng4 = period_range("7/1/2000", "7/31/2000", freq="D", name="idx")
expected4 = PeriodIndex([], freq="D", name="idx")
for rng, expected in [
(rng2, expected2),
(rng3, expected3),
(rng4, expected4),
]:
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == "D"
# empty same freq
rng = date_range("6/1/2000", "6/15/2000", freq="min")
result = rng[0:0].intersection(rng)
assert len(result) == 0
result = rng.intersection(rng[0:0])
assert len(result) == 0
def test_difference(self, sort):
# diff
period_rng = ["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"]
rng1 = PeriodIndex(period_rng, freq="D")
other1 = period_range("1/6/2000", freq="D", periods=5)
expected1 = rng1
rng2 = PeriodIndex(period_rng, freq="D")
other2 = period_range("1/4/2000", freq="D", periods=5)
expected2 = PeriodIndex(["1/3/2000", "1/2/2000", "1/1/2000"], freq="D")
rng3 = PeriodIndex(period_rng, freq="D")
other3 = PeriodIndex([], freq="D")
expected3 = rng3
period_rng = [
"2000-01-01 10:00",
"2000-01-01 09:00",
"2000-01-01 12:00",
"2000-01-01 11:00",
"2000-01-01 13:00",
]
rng4 = PeriodIndex(period_rng, freq="h")
other4 = period_range("2000-01-02 09:00", freq="h", periods=5)
expected4 = rng4
rng5 = PeriodIndex(
["2000-01-01 09:03", "2000-01-01 09:01", "2000-01-01 09:05"], freq="min"
)
other5 = PeriodIndex(["2000-01-01 09:01", "2000-01-01 09:05"], freq="min")
expected5 = PeriodIndex(["2000-01-01 09:03"], freq="min")
period_rng = [
"2000-02-01",
"2000-01-01",
"2000-06-01",
"2000-07-01",
"2000-05-01",
"2000-03-01",
"2000-04-01",
]
rng6 = PeriodIndex(period_rng, freq="M")
other6 = period_range("2000-04-01", freq="M", periods=7)
expected6 = PeriodIndex(["2000-02-01", "2000-01-01", "2000-03-01"], freq="M")
period_rng = ["2003", "2007", "2006", "2005", "2004"]
rng7 = PeriodIndex(period_rng, freq="Y")
other7 = period_range("1998-01-01", freq="Y", periods=8)
expected7 = PeriodIndex(["2007", "2006"], freq="Y")
for rng, other, expected in [
(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7),
]:
result_difference = rng.difference(other, sort=sort)
if sort is None and len(other):
# We dont sort (yet?) when empty GH#24959
expected = expected.sort_values()
tm.assert_index_equal(result_difference, expected)
def test_difference_freq(self, sort):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq="D")
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other, sort)
expected = PeriodIndex(["20160920", "20160921"], freq="D")
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
def test_intersection_equal_duplicates(self):
# GH#38302
idx = period_range("2011-01-01", periods=2)
idx_dup = idx.append(idx)
result = idx_dup.intersection(idx_dup)
tm.assert_index_equal(result, idx)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_union_duplicates(self):
# GH#36289
idx = period_range("2011-01-01", periods=2)
idx_dup = idx.append(idx)
idx2 = period_range("2011-01-02", periods=2)
idx2_dup = idx2.append(idx2)
result = idx_dup.union(idx2_dup)
expected = PeriodIndex(
[
"2011-01-01",
"2011-01-01",
"2011-01-02",
"2011-01-02",
"2011-01-03",
"2011-01-03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
| TestPeriodIndex |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 184659,
"end": 187928
} | class ____(sparse_test_class(minmax=False)):
spcreator = lil_array
math_dtypes = [np.int_, np.float64, np.complex128]
def test_dot(self):
A = zeros((10, 10), np.complex128)
A[0, 3] = 10
A[5, 6] = 20j
B = self.lil_container((10, 10), dtype=np.complex128)
B[0, 3] = 10
B[5, 6] = 20j
# TODO: properly handle this assertion on ppc64le
if platform.machine() != 'ppc64le':
assert_array_equal(A @ A.T, (B @ B.T).toarray())
assert_array_equal(A @ A.conjugate().T, (B @ B.conjugate().T).toarray())
def test_scalar_mul(self):
x = self.lil_container((3, 3))
x[0, 0] = 2
x = x*2
assert_equal(x[0, 0], 4)
x = x*0
assert_equal(x[0, 0], 0)
def test_truediv_scalar(self):
A = self.spcreator((3, 2))
A[0, 1] = -10
A[2, 0] = 20
assert_array_equal((A / 1j).toarray(), A.toarray() / 1j)
assert_array_equal((A / 9).toarray(), A.toarray() / 9)
def test_inplace_ops(self):
A = self.lil_container([[0, 2, 3], [4, 0, 6]])
B = self.lil_container([[0, 1, 0], [0, 2, 3]])
data = {'add': (B, A + B),
'sub': (B, A - B),
'mul': (3, A * 3)}
for op, (other, expected) in data.items():
result = A.copy()
getattr(result, f'__i{op}__')(other)
assert_array_equal(result.toarray(), expected.toarray())
# Ticket 1604.
A = self.lil_container((1, 3), dtype=np.dtype('float64'))
B = self.asdense([0.1, 0.1, 0.1])
A[0, :] += B
assert_array_equal(A[0, :].toarray(), B)
def test_lil_iteration(self):
row_data = [[1, 2, 3], [4, 5, 6]]
B = self.lil_container(array(row_data))
for r, row in enumerate(B):
assert_array_equal(row.toarray(), array(row_data[r], ndmin=row.ndim))
def test_lil_from_csr(self):
# Tests whether a LIL can be constructed from a CSR.
B = self.lil_container((10, 10))
B[0, 3] = 10
B[5, 6] = 20
B[8, 3] = 30
B[3, 8] = 40
B[8, 9] = 50
C = B.tocsr()
D = self.lil_container(C)
assert_array_equal(C.toarray(), D.toarray())
def test_fancy_indexing_lil(self):
M = self.asdense(arange(25).reshape(5, 5))
A = self.lil_container(M)
assert_equal(A[array([1, 2, 3]), 2:3].toarray(),
M[array([1, 2, 3]), 2:3])
def test_point_wise_multiply(self):
l = self.lil_container((4, 3))
l[0, 0] = 1
l[1, 1] = 2
l[2, 2] = 3
l[3, 1] = 4
m = self.lil_container((4, 3))
m[0, 0] = 1
m[0, 1] = 2
m[2, 2] = 3
m[3, 1] = 4
m[3, 2] = 4
assert_array_equal(l.multiply(m).toarray(),
m.multiply(l).toarray())
assert_array_equal(l.multiply(m).toarray(),
[[1, 0, 0],
[0, 0, 0],
[0, 0, 9],
[0, 16, 0]])
def test_lil_multiply_removal(self):
# Ticket #1427.
a = self.lil_container(np.ones((3, 3)))
a *= 2.
a[0, :] = 0
| TestLIL |
python | apache__airflow | airflow-ctl/src/airflowctl/api/operations.py | {
"start": 22327,
"end": 24243
} | class ____(BaseOperations):
"""Pool operations."""
def get(self, pool_name: str) -> PoolResponse | ServerResponseError:
"""Get a pool."""
try:
self.response = self.client.get(f"pools/{pool_name}")
return PoolResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def list(self) -> PoolCollectionResponse | ServerResponseError:
"""List all pools."""
return super().execute_list(path="pools", data_model=PoolCollectionResponse)
def create(self, pool: PoolBody) -> PoolResponse | ServerResponseError:
"""Create a pool."""
try:
self.response = self.client.post("pools", json=pool.model_dump(mode="json"))
return PoolResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def bulk(self, pools: BulkBodyPoolBody) -> BulkResponse | ServerResponseError:
"""CRUD multiple pools."""
try:
self.response = self.client.patch("pools", json=pools.model_dump(mode="json"))
return BulkResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def delete(self, pool: str) -> str | ServerResponseError:
"""Delete a pool."""
try:
self.client.delete(f"pools/{pool}")
return pool
except ServerResponseError as e:
raise e
def update(self, pool_body: PoolPatchBody) -> PoolResponse | ServerResponseError:
"""Update a pool."""
try:
self.response = self.client.patch(
f"pools/{pool_body.pool}", json=pool_body.model_dump(mode="json")
)
return PoolResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
| PoolsOperations |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 4271,
"end": 5694
} | class ____(Regression):
"""Linear regression model with a regularization factor which does both variable selection
and regularization. Model that tries to balance the fit of the model with respect to the training
data and the complexity of the model. A large regularization factor with decreases the variance of
the model and do para.
Parameters:
-----------
degree: int
The degree of the polynomial that the independent variable X will be transformed to.
reg_factor: float
The factor that will determine the amount of regularization and feature
shrinkage.
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01):
self.degree = degree
self.regularization = l1_regularization(alpha=reg_factor)
super(LassoRegression, self).__init__(n_iterations,
learning_rate)
def fit(self, X, y):
X = normalize(polynomial_features(X, degree=self.degree))
super(LassoRegression, self).fit(X, y)
def predict(self, X):
X = normalize(polynomial_features(X, degree=self.degree))
return super(LassoRegression, self).predict(X)
| LassoRegression |
python | ZoranPandovski__al-go-rithms | data_structures/Tree/splay_tree/python/splay_tree.py | {
"start": 32,
"end": 190
} | class ____:
__slots__ = ('key','left','right')
def __init__(self, key):
self.key = key
self.left = None
self.right = None
| Node |
python | google__pytype | pytype/pytd/pytd_utils_test.py | {
"start": 10823,
"end": 12752
} | class ____(parser_test_base.ParserTest):
"""Test pytd_utils.Print."""
def test_smoke(self):
"""Smoketest for printing pytd."""
ast = self.Parse("""
from typing import Any, Union
c1 = ... # type: int
T = TypeVar('T')
class A(typing.Generic[T], object):
bar = ... # type: T
def foo(self, x: list[int], y: T) -> Union[list[T], float]:
raise ValueError()
X = TypeVar('X')
Y = TypeVar('Y')
def bar(x: Union[X, Y]) -> Any: ...
""")
pytd_utils.Print(ast)
def test_literal(self):
ast = self.Parse("""
from typing import Literal
x1: Literal[""]
x2: Literal[b""]
x3: Literal[0]
x4: Literal[True]
x5: Literal[None]
""")
ast = ast.Visit(visitors.LookupBuiltins(self.loader.builtins))
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
from typing import Literal
x1: Literal['']
x2: Literal[b'']
x3: Literal[0]
x4: Literal[True]
x5: None
""").strip(),
)
def test_literal_union(self):
ast = self.Parse("""
from typing import Literal, Union
x: Union[Literal["x"], Literal["y"]]
""")
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
from typing import Literal
x: Literal['x', 'y']
""").strip(),
)
def test_reuse_union_name(self):
src = """
import typing
from typing import Callable, Iterable
class Node: ...
class Union:
_predicates: tuple[Callable[[typing.Union[Iterable[Node], Node]], bool], ...]
def __init__(self, *predicates: Callable[[typing.Union[Iterable[Node], Node]], bool]) -> None: ...
"""
ast = self.Parse(src)
self.assertMultiLineEqual(
pytd_utils.Print(ast), textwrap.dedent(src).strip()
)
if __name__ == "__main__":
unittest.main()
| PrintTest |
python | django__django | django/db/models/functions/math.py | {
"start": 685,
"end": 1819
} | class ____(NumericOutputFieldMixin, Func):
function = "ATAN2"
arity = 2
def as_sqlite(self, compiler, connection, **extra_context):
if not getattr(
connection.ops, "spatialite", False
) or connection.ops.spatial_version >= (5, 0, 0):
return self.as_sql(compiler, connection)
# This function is usually ATan2(y, x), returning the inverse tangent
# of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0.
# Cast integers to float to avoid inconsistent/buggy behavior if the
# arguments are mixed between integer and float or decimal.
# https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2
clone = self.copy()
clone.set_source_expressions(
[
(
Cast(expression, FloatField())
if isinstance(expression.output_field, IntegerField)
else expression
)
for expression in self.get_source_expressions()[::-1]
]
)
return clone.as_sql(compiler, connection, **extra_context)
| ATan2 |
python | chroma-core__chroma | chromadb/test/ef/test_custom_ef.py | {
"start": 1000,
"end": 3303
} | class ____(EmbeddingFunction[Embeddable]):
def __call__(self, input: Embeddable) -> Embeddings:
return cast(Embeddings, np.array([1, 2, 3]).tolist())
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
@staticmethod
def name() -> str:
return "custom_embedding_function_with_registration"
@staticmethod
def build_from_config(
config: dict[str, Any]
) -> "CustomEmbeddingFunctionWithRegistration":
return CustomEmbeddingFunctionWithRegistration()
def get_config(self) -> dict[str, Any]:
return {}
def test_legacy_custom_ef() -> None:
ef = LegacyCustomEmbeddingFunction()
result = ef(["test"])
# Check the structure: we expect a list with one NumPy array
assert isinstance(result, list), "Result should be a list"
assert len(result) == 1, "Result should contain exactly one element"
assert isinstance(result[0], np.ndarray), "Result element should be a NumPy array"
# Compare the contents of the array
expected = np.array([1, 2, 3], dtype=np.float32)
assert np.array_equal(
result[0], expected
), f"Arrays not equal: {result[0]} vs {expected}"
def test_custom_ef() -> None:
ef = CustomEmbeddingFunction()
result = ef(["test"])
# Same checks as above
assert isinstance(result, list), "Result should be a list"
assert len(result) == 1, "Result should contain exactly one element"
assert isinstance(result[0], np.ndarray), "Result element should be a NumPy array"
expected = np.array([1, 2, 3], dtype=np.float32)
assert np.array_equal(
result[0], expected
), f"Arrays not equal: {result[0]} vs {expected}"
def test_custom_ef_registration() -> None:
# check all 4 embedding functions for registration.
# LegacyCustomEmbeddingFunction should not be in known_embedding_functions
# CustomEmbeddingFunction should not be in known_embedding_functions
# CustomEmbeddingFunctionWithRegistration should be in known_embedding_functions
assert "legacy_custom_embedding_function" not in known_embedding_functions
assert "custom_embedding_function" not in known_embedding_functions
assert "custom_embedding_function_with_registration" in known_embedding_functions
| CustomEmbeddingFunctionWithRegistration |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 13048,
"end": 13561
} | class ____:
"""Base class for all "outgoing" information from a routine.
Objects of this class stores a SymPy expression, and a SymPy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
def __str__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.expr,
self.result_var)
__repr__ = __str__
| ResultBase |
python | plotly__plotly.py | plotly/graph_objs/mesh3d/_colorbar.py | {
"start": 233,
"end": 61447
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "mesh3d"
_path_str = "mesh3d.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.mesh3d.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.mesh3d.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.mesh3d.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.mesh3d.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
mesh3d.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.mesh3d.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.mesh3d.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.mesh3d.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.mesh3d
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
mesh3d.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.mesh3d.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.mesh3d.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.mesh3d
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
mesh3d.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.mesh3d.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.mesh3d.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | facebookresearch__faiss | tests/test_partition.py | {
"start": 5343,
"end": 6861
} | class ____(unittest.TestCase, PartitionTests):
def do_partition(self, n, q, maxval=65536, seed=None):
#seed = 1235
if seed is None:
for i in range(50):
self.do_partition(n, q, maxval, i + 1234)
rs = np.random.RandomState(seed)
vals = rs.randint(maxval, size=n).astype('uint16')
ids = (rs.permutation(n) + 12345).astype('int64')
dic = dict(zip(ids, vals))
sp = faiss.swig_ptr
vals_orig = vals.copy()
tab_a = faiss.AlignedTableUint16()
vals_inv = (65535 - vals).astype('uint16')
faiss.copy_array_to_AlignedTable(vals_inv, tab_a)
if isinstance(q, int):
faiss.CMin_uint16_partition_fuzzy(
tab_a.get(), sp(ids), n, q, q, None)
else:
q_min, q_max = q
q = pointer_to_minus1()
thresh2 = faiss.CMin_uint16_partition_fuzzy(
tab_a.get(), sp(ids), n,
q_min, q_max, sp(q)
)
q = q[0]
assert q_min <= q <= q_max
vals_inv = faiss.AlignedTable_to_array(tab_a)
vals = 65535 - vals_inv
o = vals_orig.argsort()
thresh = vals_orig[o[q]]
n_eq = (vals_orig[o[:q]] == thresh).sum()
for i in range(q):
self.assertEqual(vals[i], dic[ids[i]])
self.assertLessEqual(vals[i], thresh)
if vals[i] == thresh:
n_eq -= 1
self.assertEqual(n_eq, 0)
| TestPartitioningUint16Min |
python | google__python-fire | examples/diff/diff.py | {
"start": 1897,
"end": 3189
} | class ____(object):
"""Provides a simple interface to the difflib module.
The purpose of this simple interface is to offer a limited subset of the
difflib functionality as a command line interface.
"""
def __init__(self, fromfile, tofile):
self._fromfile = fromfile
self._tofile = tofile
self.fromdate = time.ctime(os.stat(fromfile).st_mtime)
self.todate = time.ctime(os.stat(tofile).st_mtime)
with open(fromfile) as f:
self.fromlines = f.readlines()
with open(tofile) as f:
self.tolines = f.readlines()
def unified_diff(self, lines=3):
return difflib.unified_diff(
self.fromlines, self.tolines, self._fromfile,
self._tofile, self.fromdate, self.todate, n=lines)
def ndiff(self):
return difflib.ndiff(self.fromlines, self.tolines)
def make_file(self, context=False, lines=3):
return difflib.HtmlDiff().make_file(
self.fromlines, self.tolines, self._fromfile, self._tofile,
context=context, numlines=lines)
def context_diff(self, lines=3):
return difflib.context_diff(
self.fromlines, self.tolines, self._fromfile,
self._tofile, self.fromdate, self.todate, n=lines)
def main():
fire.Fire(DiffLibWrapper, name='diff')
if __name__ == '__main__':
main()
| DiffLibWrapper |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 17178,
"end": 18239
} | class ____(graphene.Enum):
TIME_WINDOW = "TIME_WINDOW"
STATIC = "STATIC"
MULTIPARTITIONED = "MULTIPARTITIONED"
DYNAMIC = "DYNAMIC"
class Meta:
name = "PartitionDefinitionType"
@classmethod
def from_partition_def_data(cls, partition_def_data):
check.inst_param(partition_def_data, "partition_def_data", PartitionsSnap)
if isinstance(partition_def_data, StaticPartitionsSnap):
return GraphenePartitionDefinitionType.STATIC
elif isinstance(partition_def_data, TimeWindowPartitionsSnap):
return GraphenePartitionDefinitionType.TIME_WINDOW
elif isinstance(partition_def_data, MultiPartitionsSnap):
return GraphenePartitionDefinitionType.MULTIPARTITIONED
elif isinstance(partition_def_data, DynamicPartitionsSnap):
return GraphenePartitionDefinitionType.DYNAMIC
else:
check.failed(
f"Invalid external partitions definition data type: {type(partition_def_data)}"
)
| GraphenePartitionDefinitionType |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/typing/__init__.py | {
"start": 5001,
"end": 5300
} | class ____(TypedDict):
kind: Literal["struct"]
fields: list[_StructFieldHeader]
DataTypeHeader = (
_ScalarDataTypeHeader
| _DecimalDataTypeHeader
| _DatetimeDataTypeHeader
| _DurationDataTypeHeader
| _ListDataTypeHeader
| _StructDataTypeHeader
)
| _StructDataTypeHeader |
python | dateutil__dateutil | tests/test_rrule.py | {
"start": 394,
"end": 205083
} | class ____(unittest.TestCase):
def _rrulestr_reverse_test(self, rule):
"""
Call with an `rrule` and it will test that `str(rrule)` generates a
string which generates the same `rrule` as the input when passed to
`rrulestr()`
"""
rr_str = str(rule)
rrulestr_rrule = rrulestr(rr_str)
self.assertEqual(list(rule), list(rrulestr_rrule))
def testStrAppendRRULEToken(self):
# `_rrulestr_reverse_test` does not check if the "RRULE:" prefix
# property is appended properly, so give it a dedicated test
self.assertEqual(str(rrule(YEARLY,
count=5,
dtstart=datetime(1997, 9, 2, 9, 0))),
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=5")
rr_str = (
'DTSTART:19970105T083000\nRRULE:FREQ=YEARLY;INTERVAL=2'
)
self.assertEqual(str(rrulestr(rr_str)), rr_str)
def testYearly(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testYearlyInterval(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0),
datetime(2001, 9, 2, 9, 0)])
def testYearlyIntervalLarge(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
interval=100,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(2097, 9, 2, 9, 0),
datetime(2197, 9, 2, 9, 0)])
def testYearlyByMonth(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 2, 9, 0),
datetime(1998, 3, 2, 9, 0),
datetime(1999, 1, 2, 9, 0)])
def testYearlyByMonthDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testYearlyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testYearlyByWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testYearlyByNWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 25, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 12, 31, 9, 0)])
def testYearlyByNWeekDayLarge(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 11, 9, 0),
datetime(1998, 1, 20, 9, 0),
datetime(1998, 12, 17, 9, 0)])
def testYearlyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testYearlyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 29, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testYearlyByMonthAndNWeekDayLarge(self):
# This is interesting because the TH(-3) ends up before
# the TU(3).
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 15, 9, 0),
datetime(1998, 1, 20, 9, 0),
datetime(1998, 3, 12, 9, 0)])
def testYearlyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testYearlyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testYearlyByYearDay(self):
self.assertEqual(list(rrule(YEARLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testYearlyByYearDayNeg(self):
self.assertEqual(list(rrule(YEARLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testYearlyByMonthAndYearDay(self):
self.assertEqual(list(rrule(YEARLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 4, 10, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testYearlyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(YEARLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 4, 10, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testYearlyByWeekNo(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testYearlyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testYearlyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testYearlyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testYearlyByEaster(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testYearlyByEasterPos(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testYearlyByEasterNeg(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testYearlyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testYearlyByHour(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1998, 9, 2, 6, 0),
datetime(1998, 9, 2, 18, 0)])
def testYearlyByMinute(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1998, 9, 2, 9, 6)])
def testYearlyBySecond(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1998, 9, 2, 9, 0, 6)])
def testYearlyByHourAndMinute(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1998, 9, 2, 6, 6)])
def testYearlyByHourAndSecond(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1998, 9, 2, 6, 0, 6)])
def testYearlyByMinuteAndSecond(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testYearlyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testYearlyBySetPos(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonthday=15,
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 11, 15, 18, 0),
datetime(1998, 2, 15, 6, 0),
datetime(1998, 11, 15, 18, 0)])
def testMonthly(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 10, 2, 9, 0),
datetime(1997, 11, 2, 9, 0)])
def testMonthlyInterval(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 11, 2, 9, 0),
datetime(1998, 1, 2, 9, 0)])
def testMonthlyIntervalLarge(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
interval=18,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1999, 3, 2, 9, 0),
datetime(2000, 9, 2, 9, 0)])
def testMonthlyByMonth(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 2, 9, 0),
datetime(1998, 3, 2, 9, 0),
datetime(1999, 1, 2, 9, 0)])
def testMonthlyByMonthDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testMonthlyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testMonthlyByWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
# Third Monday of the month
self.assertEqual(rrule(MONTHLY,
byweekday=(MO(+3)),
dtstart=datetime(1997, 9, 1)).between(datetime(1997, 9, 1),
datetime(1997, 12, 1)),
[datetime(1997, 9, 15, 0, 0),
datetime(1997, 10, 20, 0, 0),
datetime(1997, 11, 17, 0, 0)])
def testMonthlyByNWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 25, 9, 0),
datetime(1997, 10, 7, 9, 0)])
def testMonthlyByNWeekDayLarge(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 16, 9, 0),
datetime(1997, 10, 16, 9, 0)])
def testMonthlyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testMonthlyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 29, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testMonthlyByMonthAndNWeekDayLarge(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 15, 9, 0),
datetime(1998, 1, 20, 9, 0),
datetime(1998, 3, 12, 9, 0)])
def testMonthlyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testMonthlyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testMonthlyByYearDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testMonthlyByYearDayNeg(self):
self.assertEqual(list(rrule(MONTHLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testMonthlyByMonthAndYearDay(self):
self.assertEqual(list(rrule(MONTHLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 4, 10, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testMonthlyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(MONTHLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 4, 10, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testMonthlyByWeekNo(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testMonthlyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testMonthlyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testMonthlyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testMonthlyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testMonthlyByEaster(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testMonthlyByEasterPos(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testMonthlyByEasterNeg(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testMonthlyByHour(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 10, 2, 6, 0),
datetime(1997, 10, 2, 18, 0)])
def testMonthlyByMinute(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 10, 2, 9, 6)])
def testMonthlyBySecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 10, 2, 9, 0, 6)])
def testMonthlyByHourAndMinute(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 10, 2, 6, 6)])
def testMonthlyByHourAndSecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 10, 2, 6, 0, 6)])
def testMonthlyByMinuteAndSecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testMonthlyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testMonthlyBySetPos(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonthday=(13, 17),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 13, 18, 0),
datetime(1997, 9, 17, 6, 0),
datetime(1997, 10, 13, 18, 0)])
def testWeekly(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testWeeklyInterval(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 16, 9, 0),
datetime(1997, 9, 30, 9, 0)])
def testWeeklyIntervalLarge(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 1, 20, 9, 0),
datetime(1998, 6, 9, 9, 0)])
def testWeeklyByMonth(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 13, 9, 0),
datetime(1998, 1, 20, 9, 0)])
def testWeeklyByMonthDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testWeeklyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testWeeklyByWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testWeeklyByNWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testWeeklyByMonthAndWeekDay(self):
# This test is interesting, because it crosses the year
# boundary in a weekly period to find day '1' as a
# valid recurrence.
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testWeeklyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testWeeklyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testWeeklyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testWeeklyByYearDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testWeeklyByYearDayNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testWeeklyByMonthAndYearDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testWeeklyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testWeeklyByWeekNo(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testWeeklyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testWeeklyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testWeeklyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testWeeklyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testWeeklyByEaster(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testWeeklyByEasterPos(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testWeeklyByEasterNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testWeeklyByHour(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 9, 6, 0),
datetime(1997, 9, 9, 18, 0)])
def testWeeklyByMinute(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 9, 9, 6)])
def testWeeklyBySecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 9, 9, 0, 6)])
def testWeeklyByHourAndMinute(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 9, 6, 6)])
def testWeeklyByHourAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 9, 6, 0, 6)])
def testWeeklyByMinuteAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testWeeklyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testWeeklyBySetPos(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 4, 6, 0),
datetime(1997, 9, 9, 18, 0)])
def testDaily(self):
self.assertEqual(list(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testDailyInterval(self):
self.assertEqual(list(rrule(DAILY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 6, 9, 0)])
def testDailyIntervalLarge(self):
self.assertEqual(list(rrule(DAILY,
count=3,
interval=92,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 12, 3, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testDailyByMonth(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 2, 9, 0),
datetime(1998, 1, 3, 9, 0)])
def testDailyByMonthDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testDailyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testDailyByWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testDailyByNWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testDailyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testDailyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testDailyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testDailyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testDailyByYearDay(self):
self.assertEqual(list(rrule(DAILY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testDailyByYearDayNeg(self):
self.assertEqual(list(rrule(DAILY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testDailyByMonthAndYearDay(self):
self.assertEqual(list(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testDailyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testDailyByWeekNo(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testDailyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testDailyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testDailyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testDailyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testDailyByEaster(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testDailyByEasterPos(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testDailyByEasterNeg(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testDailyByHour(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 3, 6, 0),
datetime(1997, 9, 3, 18, 0)])
def testDailyByMinute(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 3, 9, 6)])
def testDailyBySecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 3, 9, 0, 6)])
def testDailyByHourAndMinute(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 3, 6, 6)])
def testDailyByHourAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 3, 6, 0, 6)])
def testDailyByMinuteAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testDailyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testDailyBySetPos(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 15),
datetime(1997, 9, 3, 6, 45),
datetime(1997, 9, 3, 18, 15)])
def testHourly(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyInterval(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 11, 0),
datetime(1997, 9, 2, 13, 0)])
def testHourlyIntervalLarge(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
interval=769,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 10, 4, 10, 0),
datetime(1997, 11, 5, 11, 0)])
def testHourlyByMonth(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 0, 0),
datetime(1997, 9, 3, 1, 0),
datetime(1997, 9, 3, 2, 0)])
def testHourlyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 0, 0),
datetime(1998, 1, 5, 1, 0),
datetime(1998, 1, 5, 2, 0)])
def testHourlyByWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyByNWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByYearDay(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 1, 0),
datetime(1997, 12, 31, 2, 0),
datetime(1997, 12, 31, 3, 0)])
def testHourlyByYearDayNeg(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 1, 0),
datetime(1997, 12, 31, 2, 0),
datetime(1997, 12, 31, 3, 0)])
def testHourlyByMonthAndYearDay(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 1, 0),
datetime(1998, 4, 10, 2, 0),
datetime(1998, 4, 10, 3, 0)])
def testHourlyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 1, 0),
datetime(1998, 4, 10, 2, 0),
datetime(1998, 4, 10, 3, 0)])
def testHourlyByWeekNo(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 0, 0),
datetime(1998, 5, 11, 1, 0),
datetime(1998, 5, 11, 2, 0)])
def testHourlyByWeekNoAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 0, 0),
datetime(1997, 12, 29, 1, 0),
datetime(1997, 12, 29, 2, 0)])
def testHourlyByWeekNoAndWeekDayLarge(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 1, 0),
datetime(1997, 12, 28, 2, 0)])
def testHourlyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 1, 0),
datetime(1997, 12, 28, 2, 0)])
def testHourlyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 0, 0),
datetime(1998, 12, 28, 1, 0),
datetime(1998, 12, 28, 2, 0)])
def testHourlyByEaster(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 0, 0),
datetime(1998, 4, 12, 1, 0),
datetime(1998, 4, 12, 2, 0)])
def testHourlyByEasterPos(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 0, 0),
datetime(1998, 4, 13, 1, 0),
datetime(1998, 4, 13, 2, 0)])
def testHourlyByEasterNeg(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 0, 0),
datetime(1998, 4, 11, 1, 0),
datetime(1998, 4, 11, 2, 0)])
def testHourlyByHour(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 3, 6, 0),
datetime(1997, 9, 3, 18, 0)])
def testHourlyByMinute(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 2, 10, 6)])
def testHourlyBySecond(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 2, 10, 0, 6)])
def testHourlyByHourAndMinute(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 3, 6, 6)])
def testHourlyByHourAndSecond(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 3, 6, 0, 6)])
def testHourlyByMinuteAndSecond(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testHourlyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testHourlyBySetPos(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byminute=(15, 45),
bysecond=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 15, 45),
datetime(1997, 9, 2, 9, 45, 15),
datetime(1997, 9, 2, 10, 15, 45)])
def testMinutely(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 9, 1),
datetime(1997, 9, 2, 9, 2)])
def testMinutelyInterval(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 9, 2),
datetime(1997, 9, 2, 9, 4)])
def testMinutelyIntervalLarge(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
interval=1501,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 10, 1),
datetime(1997, 9, 4, 11, 2)])
def testMinutelyByMonth(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 0, 1),
datetime(1998, 1, 1, 0, 2)])
def testMinutelyByMonthDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 0, 0),
datetime(1997, 9, 3, 0, 1),
datetime(1997, 9, 3, 0, 2)])
def testMinutelyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 0, 0),
datetime(1998, 1, 5, 0, 1),
datetime(1998, 1, 5, 0, 2)])
def testMinutelyByWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 9, 1),
datetime(1997, 9, 2, 9, 2)])
def testMinutelyByNWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 9, 1),
datetime(1997, 9, 2, 9, 2)])
def testMinutelyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 0, 1),
datetime(1998, 1, 1, 0, 2)])
def testMinutelyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 0, 1),
datetime(1998, 1, 1, 0, 2)])
def testMinutelyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 0, 1),
datetime(1998, 1, 1, 0, 2)])
def testMinutelyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 0, 1),
datetime(1998, 1, 1, 0, 2)])
def testMinutelyByYearDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 0, 1),
datetime(1997, 12, 31, 0, 2),
datetime(1997, 12, 31, 0, 3)])
def testMinutelyByYearDayNeg(self):
self.assertEqual(list(rrule(MINUTELY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 0, 1),
datetime(1997, 12, 31, 0, 2),
datetime(1997, 12, 31, 0, 3)])
def testMinutelyByMonthAndYearDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 0, 1),
datetime(1998, 4, 10, 0, 2),
datetime(1998, 4, 10, 0, 3)])
def testMinutelyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 0, 1),
datetime(1998, 4, 10, 0, 2),
datetime(1998, 4, 10, 0, 3)])
def testMinutelyByWeekNo(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 0, 0),
datetime(1998, 5, 11, 0, 1),
datetime(1998, 5, 11, 0, 2)])
def testMinutelyByWeekNoAndWeekDay(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 0, 0),
datetime(1997, 12, 29, 0, 1),
datetime(1997, 12, 29, 0, 2)])
def testMinutelyByWeekNoAndWeekDayLarge(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 0, 1),
datetime(1997, 12, 28, 0, 2)])
def testMinutelyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 0, 1),
datetime(1997, 12, 28, 0, 2)])
def testMinutelyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 0, 0),
datetime(1998, 12, 28, 0, 1),
datetime(1998, 12, 28, 0, 2)])
def testMinutelyByEaster(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 0, 0),
datetime(1998, 4, 12, 0, 1),
datetime(1998, 4, 12, 0, 2)])
def testMinutelyByEasterPos(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 0, 0),
datetime(1998, 4, 13, 0, 1),
datetime(1998, 4, 13, 0, 2)])
def testMinutelyByEasterNeg(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 0, 0),
datetime(1998, 4, 11, 0, 1),
datetime(1998, 4, 11, 0, 2)])
def testMinutelyByHour(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 2, 18, 1),
datetime(1997, 9, 2, 18, 2)])
def testMinutelyByMinute(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 2, 10, 6)])
def testMinutelyBySecond(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 2, 9, 1, 6)])
def testMinutelyByHourAndMinute(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 3, 6, 6)])
def testMinutelyByHourAndSecond(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 2, 18, 1, 6)])
def testMinutelyByMinuteAndSecond(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testMinutelyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testMinutelyBySetPos(self):
self.assertEqual(list(rrule(MINUTELY,
count=3,
bysecond=(15, 30, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 15),
datetime(1997, 9, 2, 9, 0, 45),
datetime(1997, 9, 2, 9, 1, 15)])
def testSecondly(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 0),
datetime(1997, 9, 2, 9, 0, 1),
datetime(1997, 9, 2, 9, 0, 2)])
def testSecondlyInterval(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 0),
datetime(1997, 9, 2, 9, 0, 2),
datetime(1997, 9, 2, 9, 0, 4)])
def testSecondlyIntervalLarge(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
interval=90061,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 0),
datetime(1997, 9, 3, 10, 1, 1),
datetime(1997, 9, 4, 11, 2, 2)])
def testSecondlyByMonth(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 1),
datetime(1998, 1, 1, 0, 0, 2)])
def testSecondlyByMonthDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 0, 0, 0),
datetime(1997, 9, 3, 0, 0, 1),
datetime(1997, 9, 3, 0, 0, 2)])
def testSecondlyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 0, 0, 0),
datetime(1998, 1, 5, 0, 0, 1),
datetime(1998, 1, 5, 0, 0, 2)])
def testSecondlyByWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 0),
datetime(1997, 9, 2, 9, 0, 1),
datetime(1997, 9, 2, 9, 0, 2)])
def testSecondlyByNWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 0),
datetime(1997, 9, 2, 9, 0, 1),
datetime(1997, 9, 2, 9, 0, 2)])
def testSecondlyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 1),
datetime(1998, 1, 1, 0, 0, 2)])
def testSecondlyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 1),
datetime(1998, 1, 1, 0, 0, 2)])
def testSecondlyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 1),
datetime(1998, 1, 1, 0, 0, 2)])
def testSecondlyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 1),
datetime(1998, 1, 1, 0, 0, 2)])
def testSecondlyByYearDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0, 0),
datetime(1997, 12, 31, 0, 0, 1),
datetime(1997, 12, 31, 0, 0, 2),
datetime(1997, 12, 31, 0, 0, 3)])
def testSecondlyByYearDayNeg(self):
self.assertEqual(list(rrule(SECONDLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0, 0),
datetime(1997, 12, 31, 0, 0, 1),
datetime(1997, 12, 31, 0, 0, 2),
datetime(1997, 12, 31, 0, 0, 3)])
def testSecondlyByMonthAndYearDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0, 0),
datetime(1998, 4, 10, 0, 0, 1),
datetime(1998, 4, 10, 0, 0, 2),
datetime(1998, 4, 10, 0, 0, 3)])
def testSecondlyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0, 0),
datetime(1998, 4, 10, 0, 0, 1),
datetime(1998, 4, 10, 0, 0, 2),
datetime(1998, 4, 10, 0, 0, 3)])
def testSecondlyByWeekNo(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 0, 0, 0),
datetime(1998, 5, 11, 0, 0, 1),
datetime(1998, 5, 11, 0, 0, 2)])
def testSecondlyByWeekNoAndWeekDay(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 0, 0, 0),
datetime(1997, 12, 29, 0, 0, 1),
datetime(1997, 12, 29, 0, 0, 2)])
def testSecondlyByWeekNoAndWeekDayLarge(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0, 0),
datetime(1997, 12, 28, 0, 0, 1),
datetime(1997, 12, 28, 0, 0, 2)])
def testSecondlyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0, 0),
datetime(1997, 12, 28, 0, 0, 1),
datetime(1997, 12, 28, 0, 0, 2)])
def testSecondlyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 0, 0, 0),
datetime(1998, 12, 28, 0, 0, 1),
datetime(1998, 12, 28, 0, 0, 2)])
def testSecondlyByEaster(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 0, 0, 0),
datetime(1998, 4, 12, 0, 0, 1),
datetime(1998, 4, 12, 0, 0, 2)])
def testSecondlyByEasterPos(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 0, 0, 0),
datetime(1998, 4, 13, 0, 0, 1),
datetime(1998, 4, 13, 0, 0, 2)])
def testSecondlyByEasterNeg(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 0, 0, 0),
datetime(1998, 4, 11, 0, 0, 1),
datetime(1998, 4, 11, 0, 0, 2)])
def testSecondlyByHour(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 0),
datetime(1997, 9, 2, 18, 0, 1),
datetime(1997, 9, 2, 18, 0, 2)])
def testSecondlyByMinute(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 0),
datetime(1997, 9, 2, 9, 6, 1),
datetime(1997, 9, 2, 9, 6, 2)])
def testSecondlyBySecond(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 2, 9, 1, 6)])
def testSecondlyByHourAndMinute(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 0),
datetime(1997, 9, 2, 18, 6, 1),
datetime(1997, 9, 2, 18, 6, 2)])
def testSecondlyByHourAndSecond(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 2, 18, 1, 6)])
def testSecondlyByMinuteAndSecond(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testSecondlyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testSecondlyByHourAndMinuteAndSecondBug(self):
# This explores a bug found by Mathieu Bridon.
self.assertEqual(list(rrule(SECONDLY,
count=3,
bysecond=(0,),
byminute=(1,),
dtstart=datetime(2010, 3, 22, 12, 1))),
[datetime(2010, 3, 22, 12, 1),
datetime(2010, 3, 22, 13, 1),
datetime(2010, 3, 22, 14, 1)])
def testLongIntegers(self):
if PY2: # There are no longs in python3
self.assertEqual(list(rrule(MINUTELY,
count=long(2),
interval=long(2),
bymonth=long(2),
byweekday=long(3),
byhour=long(6),
byminute=long(6),
bysecond=long(6),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 2, 5, 6, 6, 6),
datetime(1998, 2, 12, 6, 6, 6)])
self.assertEqual(list(rrule(YEARLY,
count=long(2),
bymonthday=long(5),
byweekno=long(2),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(2004, 1, 5, 9, 0)])
def testHourlyBadRRule(self):
"""
When `byhour` is specified with `freq=HOURLY`, there are certain
combinations of `dtstart` and `byhour` which result in an rrule with no
valid values.
See https://github.com/dateutil/dateutil/issues/4
"""
self.assertRaises(ValueError, rrule, HOURLY,
**dict(interval=4, byhour=(7, 11, 15, 19),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testMinutelyBadRRule(self):
"""
See :func:`testHourlyBadRRule` for details.
"""
self.assertRaises(ValueError, rrule, MINUTELY,
**dict(interval=12, byminute=(10, 11, 25, 39, 50),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testSecondlyBadRRule(self):
"""
See :func:`testHourlyBadRRule` for details.
"""
self.assertRaises(ValueError, rrule, SECONDLY,
**dict(interval=10, bysecond=(2, 15, 37, 42, 59),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testMinutelyBadComboRRule(self):
"""
Certain values of :param:`interval` in :class:`rrule`, when combined
with certain values of :param:`byhour` create rules which apply to no
valid dates. The library should detect this case in the iterator and
raise a :exception:`ValueError`.
"""
# In Python 2.7 you can use a context manager for this.
def make_bad_rrule():
list(rrule(MINUTELY, interval=120, byhour=(10, 12, 14, 16),
count=2, dtstart=datetime(1997, 9, 2, 9, 0)))
self.assertRaises(ValueError, make_bad_rrule)
def testSecondlyBadComboRRule(self):
"""
See :func:`testMinutelyBadComboRRule' for details.
"""
# In Python 2.7 you can use a context manager for this.
def make_bad_minute_rrule():
list(rrule(SECONDLY, interval=360, byminute=(10, 28, 49),
count=4, dtstart=datetime(1997, 9, 2, 9, 0)))
def make_bad_hour_rrule():
list(rrule(SECONDLY, interval=43200, byhour=(2, 10, 18, 23),
count=4, dtstart=datetime(1997, 9, 2, 9, 0)))
self.assertRaises(ValueError, make_bad_minute_rrule)
self.assertRaises(ValueError, make_bad_hour_rrule)
def testBadUntilCountRRule(self):
"""
See rfc-5545 3.3.10 - This checks for the deprecation warning, and will
eventually check for an error.
"""
with pytest.warns(DeprecationWarning):
rrule(DAILY, dtstart=datetime(1997, 9, 2, 9, 0),
count=3, until=datetime(1997, 9, 4, 9, 0))
def testUntilNotMatching(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0),
until=datetime(1997, 9, 5, 8, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testUntilMatching(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0),
until=datetime(1997, 9, 4, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testUntilSingle(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0),
until=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0)])
def testUntilEmpty(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0),
until=datetime(1997, 9, 1, 9, 0))),
[])
def testUntilWithDate(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0),
until=date(1997, 9, 5))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testWkStIntervalMO(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=2,
byweekday=(TU, SU),
wkst=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 7, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testWkStIntervalSU(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=2,
byweekday=(TU, SU),
wkst=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testDTStartIsDate(self):
self.assertEqual(list(rrule(DAILY,
count=3,
dtstart=date(1997, 9, 2))),
[datetime(1997, 9, 2, 0, 0),
datetime(1997, 9, 3, 0, 0),
datetime(1997, 9, 4, 0, 0)])
def testDTStartWithMicroseconds(self):
self.assertEqual(list(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0, 0, 500000))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testMaxYear(self):
self.assertEqual(list(rrule(YEARLY,
count=3,
bymonth=2,
bymonthday=31,
dtstart=datetime(9997, 9, 2, 9, 0, 0))),
[])
def testGetItem(self):
self.assertEqual(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))[0],
datetime(1997, 9, 2, 9, 0))
def testGetItemNeg(self):
self.assertEqual(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))[-1],
datetime(1997, 9, 4, 9, 0))
def testGetItemSlice(self):
self.assertEqual(rrule(DAILY,
# count=3,
dtstart=datetime(1997, 9, 2, 9, 0))[1:2],
[datetime(1997, 9, 3, 9, 0)])
def testGetItemSliceEmpty(self):
self.assertEqual(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))[:],
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testGetItemSliceStep(self):
self.assertEqual(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))[::-2],
[datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 2, 9, 0)])
def testCount(self):
self.assertEqual(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)).count(),
3)
def testCountZero(self):
self.assertEqual(rrule(YEARLY,
count=0,
dtstart=datetime(1997, 9, 2, 9, 0)).count(),
0)
def testContains(self):
rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
def testContainsNot(self):
rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
self.assertEqual(datetime(1997, 9, 3, 9, 0) not in rr, False)
def testBefore(self):
self.assertEqual(rrule(DAILY, # count=5
dtstart=datetime(1997, 9, 2, 9, 0)).before(datetime(1997, 9, 5, 9, 0)),
datetime(1997, 9, 4, 9, 0))
def testBeforeInc(self):
self.assertEqual(rrule(DAILY,
#count=5,
dtstart=datetime(1997, 9, 2, 9, 0))
.before(datetime(1997, 9, 5, 9, 0), inc=True),
datetime(1997, 9, 5, 9, 0))
def testAfter(self):
self.assertEqual(rrule(DAILY,
#count=5,
dtstart=datetime(1997, 9, 2, 9, 0))
.after(datetime(1997, 9, 4, 9, 0)),
datetime(1997, 9, 5, 9, 0))
def testAfterInc(self):
self.assertEqual(rrule(DAILY,
#count=5,
dtstart=datetime(1997, 9, 2, 9, 0))
.after(datetime(1997, 9, 4, 9, 0), inc=True),
datetime(1997, 9, 4, 9, 0))
def testXAfter(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0))
.xafter(datetime(1997, 9, 8, 9, 0), count=12)),
[datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 10, 9, 0),
datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 12, 9, 0),
datetime(1997, 9, 13, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 15, 9, 0),
datetime(1997, 9, 16, 9, 0),
datetime(1997, 9, 17, 9, 0),
datetime(1997, 9, 18, 9, 0),
datetime(1997, 9, 19, 9, 0),
datetime(1997, 9, 20, 9, 0)])
def testXAfterInc(self):
self.assertEqual(list(rrule(DAILY,
dtstart=datetime(1997, 9, 2, 9, 0))
.xafter(datetime(1997, 9, 8, 9, 0), count=12, inc=True)),
[datetime(1997, 9, 8, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 10, 9, 0),
datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 12, 9, 0),
datetime(1997, 9, 13, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 15, 9, 0),
datetime(1997, 9, 16, 9, 0),
datetime(1997, 9, 17, 9, 0),
datetime(1997, 9, 18, 9, 0),
datetime(1997, 9, 19, 9, 0)])
def testBetween(self):
self.assertEqual(rrule(DAILY,
#count=5,
dtstart=datetime(1997, 9, 2, 9, 0))
.between(datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 6, 9, 0)),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 5, 9, 0)])
def testBetweenInc(self):
self.assertEqual(rrule(DAILY,
#count=5,
dtstart=datetime(1997, 9, 2, 9, 0))
.between(datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 6, 9, 0), inc=True),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 5, 9, 0),
datetime(1997, 9, 6, 9, 0)])
def testCachePre(self):
rr = rrule(DAILY, count=15, cache=True,
dtstart=datetime(1997, 9, 2, 9, 0))
self.assertEqual(list(rr),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 5, 9, 0),
datetime(1997, 9, 6, 9, 0),
datetime(1997, 9, 7, 9, 0),
datetime(1997, 9, 8, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 10, 9, 0),
datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 12, 9, 0),
datetime(1997, 9, 13, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 15, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testCachePost(self):
rr = rrule(DAILY, count=15, cache=True,
dtstart=datetime(1997, 9, 2, 9, 0))
for x in rr: pass
self.assertEqual(list(rr),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 5, 9, 0),
datetime(1997, 9, 6, 9, 0),
datetime(1997, 9, 7, 9, 0),
datetime(1997, 9, 8, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 10, 9, 0),
datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 12, 9, 0),
datetime(1997, 9, 13, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 15, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testCachePostInternal(self):
rr = rrule(DAILY, count=15, cache=True,
dtstart=datetime(1997, 9, 2, 9, 0))
for x in rr: pass
self.assertEqual(rr._cache,
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 5, 9, 0),
datetime(1997, 9, 6, 9, 0),
datetime(1997, 9, 7, 9, 0),
datetime(1997, 9, 8, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 10, 9, 0),
datetime(1997, 9, 11, 9, 0),
datetime(1997, 9, 12, 9, 0),
datetime(1997, 9, 13, 9, 0),
datetime(1997, 9, 14, 9, 0),
datetime(1997, 9, 15, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testCachePreContains(self):
rr = rrule(DAILY, count=3, cache=True,
dtstart=datetime(1997, 9, 2, 9, 0))
self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
def testCachePostContains(self):
rr = rrule(DAILY, count=3, cache=True,
dtstart=datetime(1997, 9, 2, 9, 0))
for x in rr: pass
self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
def testStr(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrWithTZID(self):
NYC = tz.gettz('America/New_York')
self.assertEqual(list(rrulestr(
"DTSTART;TZID=America/New_York:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3\n"
)),
[datetime(1997, 9, 2, 9, 0, tzinfo=NYC),
datetime(1998, 9, 2, 9, 0, tzinfo=NYC),
datetime(1999, 9, 2, 9, 0, tzinfo=NYC)])
def testStrWithTZIDMapping(self):
rrstr = ("DTSTART;TZID=Eastern:19970902T090000\n" +
"RRULE:FREQ=YEARLY;COUNT=3")
NYC = tz.gettz('America/New_York')
rr = rrulestr(rrstr, tzids={'Eastern': NYC})
exp = [datetime(1997, 9, 2, 9, 0, tzinfo=NYC),
datetime(1998, 9, 2, 9, 0, tzinfo=NYC),
datetime(1999, 9, 2, 9, 0, tzinfo=NYC)]
self.assertEqual(list(rr), exp)
def testStrWithTZIDCallable(self):
rrstr = ('DTSTART;TZID=UTC+04:19970902T090000\n' +
'RRULE:FREQ=YEARLY;COUNT=3')
TZ = tz.tzstr('UTC+04')
def parse_tzstr(tzstr):
if tzstr is None:
raise ValueError('Invalid tzstr')
return tz.tzstr(tzstr)
rr = rrulestr(rrstr, tzids=parse_tzstr)
exp = [datetime(1997, 9, 2, 9, 0, tzinfo=TZ),
datetime(1998, 9, 2, 9, 0, tzinfo=TZ),
datetime(1999, 9, 2, 9, 0, tzinfo=TZ),]
self.assertEqual(list(rr), exp)
def testStrWithTZIDCallableFailure(self):
rrstr = ('DTSTART;TZID=America/New_York:19970902T090000\n' +
'RRULE:FREQ=YEARLY;COUNT=3')
class TzInfoError(Exception):
pass
def tzinfos(tzstr):
if tzstr == 'America/New_York':
raise TzInfoError('Invalid!')
return None
with self.assertRaises(TzInfoError):
rrulestr(rrstr, tzids=tzinfos)
def testStrWithConflictingTZID(self):
# RFC 5545 Section 3.3.5, FORM #2: DATE WITH UTC TIME
# https://tools.ietf.org/html/rfc5545#section-3.3.5
# The "TZID" property parameter MUST NOT be applied to DATE-TIME
with self.assertRaises(ValueError):
rrulestr("DTSTART;TZID=America/New_York:19970902T090000Z\n"+
"RRULE:FREQ=YEARLY;COUNT=3\n")
def testStrType(self):
self.assertEqual(isinstance(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3\n"
), rrule), True)
def testStrForceSetType(self):
self.assertEqual(isinstance(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3\n"
, forceset=True), rruleset), True)
def testStrSetType(self):
self.assertEqual(isinstance(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
), rruleset), True)
def testStrCase(self):
self.assertEqual(list(rrulestr(
"dtstart:19970902T090000\n"
"rrule:freq=yearly;count=3\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrSpaces(self):
self.assertEqual(list(rrulestr(
" DTSTART:19970902T090000 "
" RRULE:FREQ=YEARLY;COUNT=3 "
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrSpacesAndLines(self):
self.assertEqual(list(rrulestr(
" DTSTART:19970902T090000 \n"
" \n"
" RRULE:FREQ=YEARLY;COUNT=3 \n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrNoDTStart(self):
self.assertEqual(list(rrulestr(
"RRULE:FREQ=YEARLY;COUNT=3\n"
, dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrValueOnly(self):
self.assertEqual(list(rrulestr(
"FREQ=YEARLY;COUNT=3\n"
, dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrUnfold(self):
self.assertEqual(list(rrulestr(
"FREQ=YEA\n RLY;COUNT=3\n", unfold=True,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 9, 2, 9, 0),
datetime(1999, 9, 2, 9, 0)])
def testStrSet(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testStrSetDate(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TU\n"
"RDATE:19970904T090000\n"
"RDATE:19970909T090000\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testStrSetExRule(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
"EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testStrSetExDate(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
"EXDATE:19970904T090000\n"
"EXDATE:19970911T090000\n"
"EXDATE:19970918T090000\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testStrSetExDateMultiple(self):
rrstr = ("DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
"EXDATE:19970904T090000,19970911T090000,19970918T090000\n")
rr = rrulestr(rrstr)
assert list(rr) == [datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)]
def testStrSetExDateWithTZID(self):
BXL = tz.gettz('Europe/Brussels')
rr = rrulestr("DTSTART;TZID=Europe/Brussels:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
"EXDATE;TZID=Europe/Brussels:19970904T090000\n"
"EXDATE;TZID=Europe/Brussels:19970911T090000\n"
"EXDATE;TZID=Europe/Brussels:19970918T090000\n")
assert list(rr) == [datetime(1997, 9, 2, 9, 0, tzinfo=BXL),
datetime(1997, 9, 9, 9, 0, tzinfo=BXL),
datetime(1997, 9, 16, 9, 0, tzinfo=BXL)]
def testStrSetExDateValueDateTimeNoTZID(self):
rrstr = '\n'.join([
"DTSTART:19970902T090000",
"RRULE:FREQ=YEARLY;COUNT=4;BYDAY=TU,TH",
"EXDATE;VALUE=DATE-TIME:19970902T090000",
"EXDATE;VALUE=DATE-TIME:19970909T090000",
])
rr = rrulestr(rrstr)
assert list(rr) == [datetime(1997, 9, 4, 9), datetime(1997, 9, 11, 9)]
def testStrSetExDateValueMixDateTimeNoTZID(self):
rrstr = '\n'.join([
"DTSTART:19970902T090000",
"RRULE:FREQ=YEARLY;COUNT=4;BYDAY=TU,TH",
"EXDATE;VALUE=DATE-TIME:19970902T090000",
"EXDATE:19970909T090000",
])
rr = rrulestr(rrstr)
assert list(rr) == [datetime(1997, 9, 4, 9), datetime(1997, 9, 11, 9)]
def testStrSetExDateValueDateTimeWithTZID(self):
BXL = tz.gettz('Europe/Brussels')
rrstr = '\n'.join([
"DTSTART;VALUE=DATE-TIME;TZID=Europe/Brussels:19970902T090000",
"RRULE:FREQ=YEARLY;COUNT=4;BYDAY=TU,TH",
"EXDATE;VALUE=DATE-TIME;TZID=Europe/Brussels:19970902T090000",
"EXDATE;VALUE=DATE-TIME;TZID=Europe/Brussels:19970909T090000",
])
rr = rrulestr(rrstr)
assert list(rr) == [datetime(1997, 9, 4, 9, tzinfo=BXL),
datetime(1997, 9, 11, 9, tzinfo=BXL)]
def testStrSetExDateValueDate(self):
rrstr = '\n'.join([
"DTSTART;VALUE=DATE:19970902",
"RRULE:FREQ=YEARLY;COUNT=4;BYDAY=TU,TH",
"EXDATE;VALUE=DATE:19970902",
"EXDATE;VALUE=DATE:19970909",
])
rr = rrulestr(rrstr)
assert list(rr) == [datetime(1997, 9, 4), datetime(1997, 9, 11)]
def testStrSetDateAndExDate(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RDATE:19970902T090000\n"
"RDATE:19970904T090000\n"
"RDATE:19970909T090000\n"
"RDATE:19970911T090000\n"
"RDATE:19970916T090000\n"
"RDATE:19970918T090000\n"
"EXDATE:19970904T090000\n"
"EXDATE:19970911T090000\n"
"EXDATE:19970918T090000\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testStrSetDateAndExRule(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RDATE:19970902T090000\n"
"RDATE:19970904T090000\n"
"RDATE:19970909T090000\n"
"RDATE:19970911T090000\n"
"RDATE:19970916T090000\n"
"RDATE:19970918T090000\n"
"EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n"
)),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testStrKeywords(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3;INTERVAL=3;"
"BYMONTH=3;BYWEEKDAY=TH;BYMONTHDAY=3;"
"BYHOUR=3;BYMINUTE=3;BYSECOND=3\n"
)),
[datetime(2033, 3, 3, 3, 3, 3),
datetime(2039, 3, 3, 3, 3, 3),
datetime(2072, 3, 3, 3, 3, 3)])
def testStrNWeekDay(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=3;BYDAY=1TU,-1TH\n"
)),
[datetime(1997, 12, 25, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 12, 31, 9, 0)])
def testStrUntil(self):
self.assertEqual(list(rrulestr(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;"
"UNTIL=19990101T000000;BYDAY=1TU,-1TH\n"
)),
[datetime(1997, 12, 25, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 12, 31, 9, 0)])
def testStrValueDatetime(self):
rr = rrulestr("DTSTART;VALUE=DATE-TIME:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=2")
self.assertEqual(list(rr), [datetime(1997, 9, 2, 9, 0, 0),
datetime(1998, 9, 2, 9, 0, 0)])
def testStrValueDate(self):
rr = rrulestr("DTSTART;VALUE=DATE:19970902\n"
"RRULE:FREQ=YEARLY;COUNT=2")
self.assertEqual(list(rr), [datetime(1997, 9, 2, 0, 0, 0),
datetime(1998, 9, 2, 0, 0, 0)])
def testStrMultipleDTStartComma(self):
with pytest.raises(ValueError):
rr = rrulestr("DTSTART:19970101T000000,19970202T000000\n"
"RRULE:FREQ=YEARLY;COUNT=1")
def testStrInvalidUntil(self):
with self.assertRaises(ValueError):
list(rrulestr("DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;"
"UNTIL=TheCowsComeHome;BYDAY=1TU,-1TH\n"))
def testStrUntilMustBeUTC(self):
with self.assertRaises(ValueError):
list(rrulestr("DTSTART;TZID=America/New_York:19970902T090000\n"
"RRULE:FREQ=YEARLY;"
"UNTIL=19990101T000000;BYDAY=1TU,-1TH\n"))
def testStrUntilWithTZ(self):
NYC = tz.gettz('America/New_York')
rr = list(rrulestr("DTSTART;TZID=America/New_York:19970101T000000\n"
"RRULE:FREQ=YEARLY;"
"UNTIL=19990101T000000Z\n"))
self.assertEqual(list(rr), [datetime(1997, 1, 1, 0, 0, 0, tzinfo=NYC),
datetime(1998, 1, 1, 0, 0, 0, tzinfo=NYC)])
def testStrEmptyByDay(self):
with self.assertRaises(ValueError):
list(rrulestr("DTSTART:19970902T090000\n"
"FREQ=WEEKLY;"
"BYDAY=;" # This part is invalid
"WKST=SU"))
def testStrInvalidByDay(self):
with self.assertRaises(ValueError):
list(rrulestr("DTSTART:19970902T090000\n"
"FREQ=WEEKLY;"
"BYDAY=-1OK;" # This part is invalid
"WKST=SU"))
def testBadBySetPos(self):
self.assertRaises(ValueError,
rrule, MONTHLY,
count=1,
bysetpos=0,
dtstart=datetime(1997, 9, 2, 9, 0))
def testBadBySetPosMany(self):
self.assertRaises(ValueError,
rrule, MONTHLY,
count=1,
bysetpos=(-1, 0, 1),
dtstart=datetime(1997, 9, 2, 9, 0))
# Tests to ensure that str(rrule) works
def testToStrYearly(self):
rule = rrule(YEARLY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
self._rrulestr_reverse_test(rule)
def testToStrYearlyInterval(self):
rule = rrule(YEARLY, count=3, interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))
self._rrulestr_reverse_test(rule)
def testToStrYearlyByMonth(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByNWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndNWeekDayLarge(self):
# This is interesting because the TH(-3) ends up before
# the TU(3).
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByYearDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekNo(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByEaster(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByEasterPos(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByHour(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMinute(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyBySecond(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrYearlyBySetPos(self):
self._rrulestr_reverse_test(rrule(YEARLY,
count=3,
bymonthday=15,
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthly(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyInterval(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
interval=18,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonth(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
# Third Monday of the month
self.assertEqual(rrule(MONTHLY,
byweekday=(MO(+3)),
dtstart=datetime(1997, 9, 1)).between(datetime(1997,
9,
1),
datetime(1997,
12,
1)),
[datetime(1997, 9, 15, 0, 0),
datetime(1997, 10, 20, 0, 0),
datetime(1997, 11, 17, 0, 0)])
def testToStrMonthlyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByNWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndNWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(3), TH(-3)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByYearDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekNo(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByEaster(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByEasterPos(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByHour(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMinute(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyBySecond(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMonthlyBySetPos(self):
self._rrulestr_reverse_test(rrule(MONTHLY,
count=3,
bymonthday=(13, 17),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeekly(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyInterval(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
interval=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonth(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndWeekDay(self):
# This test is interesting, because it crosses the year
# boundary in a weekly period to find day '1' as a
# valid recurrence.
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByYearDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekNo(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByEaster(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByEasterPos(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByHour(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMinute(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyBySecond(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrWeeklyBySetPos(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDaily(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyInterval(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
interval=92,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonth(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByYearDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekNo(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByEaster(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByEasterPos(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByHour(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMinute(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyBySecond(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrDailyBySetPos(self):
self._rrulestr_reverse_test(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourly(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyInterval(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
interval=769,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonth(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByYearDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekNo(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekNoAndWeekDay(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekNoAndWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByEaster(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByEasterPos(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHour(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMinute(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyBySecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyBySetPos(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(15, 45),
bysecond=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutely(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyInterval(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
interval=1501,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonth(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByYearDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNo(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEaster(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEasterPos(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHour(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMinute(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyBySecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyBySetPos(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bysecond=(15, 30, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondly(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyInterval(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
interval=90061,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonth(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByYearDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNo(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEaster(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEasterPos(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHour(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMinute(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyBySecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinuteAndSecondBug(self):
# This explores a bug found by Mathieu Bridon.
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bysecond=(0,),
byminute=(1,),
dtstart=datetime(2010, 3, 22, 12, 1)))
def testToStrWithWkSt(self):
self._rrulestr_reverse_test(rrule(WEEKLY,
count=3,
wkst=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrLongIntegers(self):
if PY2: # There are no longs in python3
self._rrulestr_reverse_test(rrule(MINUTELY,
count=long(2),
interval=long(2),
bymonth=long(2),
byweekday=long(3),
byhour=long(6),
byminute=long(6),
bysecond=long(6),
dtstart=datetime(1997, 9, 2, 9, 0)))
self._rrulestr_reverse_test(rrule(YEARLY,
count=long(2),
bymonthday=long(5),
byweekno=long(2),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testReplaceIfSet(self):
rr = rrule(YEARLY,
count=1,
bymonthday=5,
dtstart=datetime(1997, 1, 1))
newrr = rr.replace(bymonthday=6)
self.assertEqual(list(rr), [datetime(1997, 1, 5)])
self.assertEqual(list(newrr),
[datetime(1997, 1, 6)])
def testReplaceIfNotSet(self):
rr = rrule(YEARLY,
count=1,
dtstart=datetime(1997, 1, 1))
newrr = rr.replace(bymonthday=6)
self.assertEqual(list(rr), [datetime(1997, 1, 1)])
self.assertEqual(list(newrr),
[datetime(1997, 1, 6)])
@pytest.mark.rrule
@freeze_time(datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC))
def test_generated_aware_dtstart():
dtstart_exp = datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC)
UNTIL = datetime(2018, 3, 6, 8, 0, tzinfo=tz.UTC)
rule_without_dtstart = rrule(freq=HOURLY, until=UNTIL)
rule_with_dtstart = rrule(freq=HOURLY, dtstart=dtstart_exp, until=UNTIL)
assert list(rule_without_dtstart) == list(rule_with_dtstart)
@pytest.mark.rrule
@pytest.mark.rrulestr
@pytest.mark.xfail(reason="rrulestr loses time zone, gh issue #637")
@freeze_time(datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC))
def test_generated_aware_dtstart_rrulestr():
rrule_without_dtstart = rrule(freq=HOURLY,
until=datetime(2018, 3, 6, 8, 0,
tzinfo=tz.UTC))
rrule_r = rrulestr(str(rrule_without_dtstart))
assert list(rrule_r) == list(rrule_without_dtstart)
@pytest.mark.rruleset
| RRuleTest |
python | pydantic__pydantic | tests/test_pickle.py | {
"start": 8451,
"end": 9277
} | class ____(BaseModel):
model_config = ConfigDict(title='MyTitle')
def model_with_config_factory() -> type:
class NonImportableModelWithConfig(BaseModel):
model_config = ConfigDict(title='MyTitle')
return NonImportableModelWithConfig
@pytest.mark.parametrize(
'model_type,use_cloudpickle',
[
(ImportableModelWithConfig, False),
(ImportableModelWithConfig, True),
pytest.param(model_with_config_factory(), True, marks=cloudpickle_pypy_xfail),
],
)
def test_pickle_model_with_config(model_type: type, use_cloudpickle: bool):
if use_cloudpickle:
model_type = cloudpickle.loads(cloudpickle.dumps(model_type))
else:
model_type = pickle.loads(pickle.dumps(model_type))
assert model_type.model_config['title'] == 'MyTitle'
| ImportableModelWithConfig |
python | sympy__sympy | sympy/utilities/matchpy_connector.py | {
"start": 5942,
"end": 6222
} | class ____(_WildAbstract):
min_length = 0
fixed_size = False
def _get_srepr(expr):
s = srepr(expr)
s = re.sub(r"WildDot\('(\w+)'\)", r"\1", s)
s = re.sub(r"WildPlus\('(\w+)'\)", r"*\1", s)
s = re.sub(r"WildStar\('(\w+)'\)", r"*\1", s)
return s
| WildStar |
python | geekcomputers__Python | Python Programs/Python Program to Reverse a linked list.py | {
"start": 249,
"end": 1305
} | class ____:
# Function to initialize head
def __init__(self):
self.head = None
# Function to reverse the linked list
def reverse(self):
prev = None
current = self.head
while current is not None:
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
# Function to insert a new node at the beginning
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# Utility function to print the linked LinkedList
def printList(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
# Driver program to test above functions
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(85)
print("Given Linked List")
llist.printList()
llist.reverse()
print("\nReversed Linked List")
llist.printList()
# This code is contributed by Nikhil Kumar Singh(nickzuck_007)
| LinkedList |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py | {
"start": 967,
"end": 1591
} | class ____(CloudEnvironment):
"""GCP cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| GcpCloudEnvironment |
python | mlflow__mlflow | mlflow/genai/scorers/builtin_scorers.py | {
"start": 2440,
"end": 2553
} | class ____:
messages: list["ChatMessage"]
schema: type[pydantic.BaseModel]
@dataclass
| FieldExtractionConfig |
python | doocs__leetcode | solution/1300-1399/1326.Minimum Number of Taps to Open to Water a Garden/Solution.py | {
"start": 0,
"end": 452
} | class ____:
def minTaps(self, n: int, ranges: List[int]) -> int:
last = [0] * (n + 1)
for i, x in enumerate(ranges):
l, r = max(0, i - x), i + x
last[l] = max(last[l], r)
ans = mx = pre = 0
for i in range(n):
mx = max(mx, last[i])
if mx <= i:
return -1
if pre == i:
ans += 1
pre = mx
return ans
| Solution |
python | PyCQA__pylint | pylint/extensions/private_import.py | {
"start": 567,
"end": 11194
} | class ____(BaseChecker):
name = "import-private-name"
msgs = {
"C2701": (
"Imported private %s (%s)",
"import-private-name",
"Used when a private module or object prefixed with _ is imported. "
"PEP8 guidance on Naming Conventions states that public attributes with "
"leading underscores should be considered private.",
),
}
def __init__(self, linter: PyLinter) -> None:
BaseChecker.__init__(self, linter)
# A mapping of private names used as a type annotation to whether it is an acceptable import
self.all_used_type_annotations: dict[str, bool] = {}
self.populated_annotations = False
@utils.only_required_for_messages("import-private-name")
def visit_import(self, node: nodes.Import) -> None:
if utils.in_type_checking_block(node):
return
names = [name[0] for name in node.names]
private_names = self._get_private_imports(names)
private_names = self._get_type_annotation_names(node, private_names)
if private_names:
imported_identifier = "modules" if len(private_names) > 1 else "module"
private_name_string = ", ".join(private_names)
self.add_message(
"import-private-name",
node=node,
args=(imported_identifier, private_name_string),
confidence=HIGH,
)
@utils.only_required_for_messages("import-private-name")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
if utils.in_type_checking_block(node):
return
# Only check imported names if the module is external
if self.same_root_dir(node, node.modname):
return
names = [n[0] for n in node.names]
# Check the imported objects first. If they are all valid type annotations,
# the package can be private
private_names = self._get_type_annotation_names(node, names)
if not private_names:
return
# There are invalid imported objects, so check the name of the package
private_module_imports = self._get_private_imports([node.modname])
private_module_imports = self._get_type_annotation_names(
node, private_module_imports
)
if private_module_imports:
self.add_message(
"import-private-name",
node=node,
args=("module", private_module_imports[0]),
confidence=HIGH,
)
return # Do not emit messages on the objects if the package is private
private_names = self._get_private_imports(private_names)
if private_names:
imported_identifier = "objects" if len(private_names) > 1 else "object"
private_name_string = ", ".join(private_names)
self.add_message(
"import-private-name",
node=node,
args=(imported_identifier, private_name_string),
confidence=HIGH,
)
def _get_private_imports(self, names: list[str]) -> list[str]:
"""Returns the private names from input names by a simple string check."""
return [name for name in names if self._name_is_private(name)]
@staticmethod
def _name_is_private(name: str) -> bool:
"""Returns true if the name exists, starts with `_`, and if len(name) > 4
it is not a dunder, i.e. it does not begin and end with two underscores.
"""
return (
bool(name)
and name[0] == "_"
and not (len(name) > 4 and name[1] == "_" and name[-2:] == "__")
)
def _get_type_annotation_names(
self, node: nodes.Import | nodes.ImportFrom, names: list[str]
) -> list[str]:
"""Removes from names any names that are used as type annotations with no other
illegal usages.
"""
if names and not self.populated_annotations:
self._populate_type_annotations(node.root(), self.all_used_type_annotations)
self.populated_annotations = True
return [
n
for n in names
if n not in self.all_used_type_annotations
or (
n in self.all_used_type_annotations
and not self.all_used_type_annotations[n]
)
]
def _populate_type_annotations(
self, node: nodes.LocalsDictNodeNG, all_used_type_annotations: dict[str, bool]
) -> None:
"""Adds to `all_used_type_annotations` all names ever used as a type annotation
in the node's (nested) scopes and whether they are only used as annotation.
"""
for name in node.locals:
# If we find a private type annotation, make sure we do not mask illegal usages
private_name = None
# All the assignments using this variable that we might have to check for
# illegal usages later
name_assignments = []
for usage_node in node.locals[name]:
if isinstance(usage_node, nodes.AssignName) and isinstance(
usage_node.parent, (nodes.AnnAssign, nodes.Assign)
):
match assign_parent := usage_node.parent:
case nodes.AnnAssign():
name_assignments.append(assign_parent)
private_name = self._populate_type_annotations_annotation(
assign_parent.annotation,
all_used_type_annotations,
)
case nodes.Assign():
name_assignments.append(assign_parent)
if isinstance(usage_node, nodes.FunctionDef):
self._populate_type_annotations_function(
usage_node, all_used_type_annotations
)
if isinstance(usage_node, nodes.LocalsDictNodeNG):
self._populate_type_annotations(
usage_node, all_used_type_annotations
)
if private_name is not None:
# Found a new private annotation, make sure we are not accessing it elsewhere
all_used_type_annotations[private_name] = (
self._assignments_call_private_name(name_assignments, private_name)
)
def _populate_type_annotations_function(
self, node: nodes.FunctionDef, all_used_type_annotations: dict[str, bool]
) -> None:
"""Adds all names used as type annotation in the arguments and return type of
the function node into the dict `all_used_type_annotations`.
"""
if node.args and node.args.annotations:
for annotation in node.args.annotations:
self._populate_type_annotations_annotation(
annotation, all_used_type_annotations
)
if node.returns:
self._populate_type_annotations_annotation(
node.returns, all_used_type_annotations
)
def _populate_type_annotations_annotation(
self,
node: nodes.Attribute | nodes.Subscript | nodes.Name | None,
all_used_type_annotations: dict[str, bool],
) -> str | None:
"""Handles the possibility of an annotation either being a Name, i.e. just type,
or a Subscript e.g. `Optional[type]` or an Attribute, e.g. `pylint.lint.linter`.
"""
match node:
case nodes.Name(name=name) if name not in all_used_type_annotations:
all_used_type_annotations[name] = True
return name # type: ignore[no-any-return]
case nodes.Subscript(): # e.g. Optional[List[str]]
# slice is the next nested type
self._populate_type_annotations_annotation(
node.slice, all_used_type_annotations
)
# value is the current type name: could be a Name or Attribute
return self._populate_type_annotations_annotation(
node.value, all_used_type_annotations
)
case nodes.Attribute():
# An attribute is a type like `pylint.lint.pylinter`. node.expr is the next level
# up, could be another attribute
return self._populate_type_annotations_annotation(
node.expr, all_used_type_annotations
)
return None
@staticmethod
def _assignments_call_private_name(
assignments: list[nodes.AnnAssign | nodes.Assign], private_name: str
) -> bool:
"""Returns True if no assignments involve accessing `private_name`."""
if all(not assignment.value for assignment in assignments):
# Variable annotated but unassigned is not allowed because there may be
# possible illegal access elsewhere
return False
for assignment in assignments:
match assignment.value:
case (
nodes.Call(func=current_attribute)
| (nodes.Attribute() as current_attribute)
| nodes.Name(name=current_attribute)
):
pass
case _:
continue
while isinstance(current_attribute, (nodes.Attribute, nodes.Call)):
if isinstance(current_attribute, nodes.Call):
current_attribute = current_attribute.func
if not isinstance(current_attribute, nodes.Name):
current_attribute = current_attribute.expr
if (
isinstance(current_attribute, nodes.Name)
and current_attribute.name == private_name
):
return False
return True
@staticmethod
def same_root_dir(
node: nodes.Import | nodes.ImportFrom, import_mod_name: str
) -> bool:
"""Does the node's file's path contain the base name of `import_mod_name`?"""
if not import_mod_name: # from . import ...
return True
if node.level: # from .foo import ..., from ..bar import ...
return True
base_import_package = import_mod_name.split(".")[0]
return base_import_package in Path(node.root().file).parent.parts
def register(linter: PyLinter) -> None:
linter.register_checker(PrivateImportChecker(linter))
| PrivateImportChecker |
python | encode__django-rest-framework | rest_framework/mixins.py | {
"start": 982,
"end": 1458
} | class ____:
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| ListModelMixin |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 53717,
"end": 55310
} | class ____(VegaLiteSchema):
"""
AutoSizeParams schema wrapper.
Parameters
----------
contains : Literal['content', 'padding']
Determines how size calculation should be performed, one of ``"content"`` or
``"padding"``. The default setting (``"content"``) interprets the width and height
settings as the data rectangle (plotting) dimensions, to which padding is then
added. In contrast, the ``"padding"`` setting includes the padding within the view
size calculations, such that the width and height settings indicate the **total**
intended size of the view.
**Default value**: ``"content"``
resize : bool
A boolean flag indicating if autosize layout should be re-calculated on every view
update.
**Default value**: ``false``
type : :class:`AutosizeType`, Literal['pad', 'none', 'fit', 'fit-x', 'fit-y']
The sizing format type. One of ``"pad"``, ``"fit"``, ``"fit-x"``, ``"fit-y"``, or
``"none"``. See the `autosize type
<https://vega.github.io/vega-lite/docs/size.html#autosize>`__ documentation for
descriptions of each.
**Default value**: ``"pad"``
"""
_schema = {"$ref": "#/definitions/AutoSizeParams"}
def __init__(
self,
contains: Optional[Literal["content", "padding"]] = Undefined,
resize: Optional[bool] = Undefined,
type: Optional[SchemaBase | AutosizeType_T] = Undefined,
**kwds,
):
super().__init__(contains=contains, resize=resize, type=type, **kwds)
| AutoSizeParams |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 39135,
"end": 40130
} | class ____(Blockwise):
_parameters = ["frame", "other"]
operation = M.combine_first
@functools.cached_property
def _meta(self):
return make_meta(
self.operation(
meta_nonempty(self.frame._meta),
meta_nonempty(self.other._meta),
),
)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
columns = determine_column_projection(self, parent, dependents)
frame_columns = [col for col in self.frame.columns if col in columns]
other_columns = [col for col in self.other.columns if col in columns]
if (
self.frame.columns == frame_columns
and self.other.columns == other_columns
):
return
return type(parent)(
type(self)(self.frame[frame_columns], self.other[other_columns]),
*parent.operands[1:],
)
| CombineFirst |
python | viewflow__viewflow | tests/fsm/test_fsm__inheritance.py | {
"start": 173,
"end": 1491
} | class ____(Publication):
@Publication.state.transition(
source=ReviewState.NEW, target=ReviewState.APPROVED, permission=this.is_approver
)
def approve(self):
pass
@Publication.state.transition(
source=ReviewState.NEW, target=ReviewState.REJECTED, permission=this.is_approver
)
def reject(self):
pass
@Publication.state.transition(
source=ReviewState.HIDDEN,
target=ReviewState.PUBLISHED,
permission=this.is_superuser,
)
@Publication.state.transition(
source=ReviewState.APPROVED, target=ReviewState.PUBLISHED
)
def publish(self):
pass
@Publication.state.transition(
source=fsm.State.ANY, target=ReviewState.HIDDEN, conditions=[this.is_short]
)
def hide(self):
pass
@Publication.state.super()
def remove(self):
super().remove.original()
def is_superuser(self, user):
return user.is_superuser
def is_approver(self, user):
return user.is_staff
is_approver.unmet_message = _("You have no staff rights")
def is_short(self):
text_length = len(self.text)
return fsm.State.UNMET(
text_length < 1000,
_("Review is too shot, add %d symbols") % 1000 - text_length,
)
| GuestPublication |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 14373,
"end": 15559
} | class ____(XYGlyph, LineGlyph, FillGlyph, HatchGlyph):
''' Render ellipses.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Ellipse.py"
_args = ('x', 'y', 'width', 'height', 'angle')
x = NumberSpec(default=field("x"), help="""
The x-coordinates of the centers of the ellipses.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates of the centers of the ellipses.
""")
width = DistanceSpec(default=field("width"), help="""
The widths of each ellipse.
""")
height = DistanceSpec(default=field("height"), help="""
The heights of each ellipse.
""")
angle = AngleSpec(default=0.0, help="""
The angle the ellipses are rotated from horizontal. [rad]
""")
line_props = Include(LineProps, help="""
The {prop} values for the ellipses.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the ellipses.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the ellipses.
""")
| Ellipse |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-auto-merging-retriever/llama_index/packs/auto_merging_retriever/base.py | {
"start": 577,
"end": 2148
} | class ____(BaseLlamaPack):
"""
Auto-merging Retriever pack.
Build a hierarchical node graph from a set of documents, and
run our auto-merging retriever.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = HierarchicalNodeParser.from_defaults()
nodes = self.node_parser.get_nodes_from_documents(docs)
leaf_nodes = get_leaf_nodes(nodes)
docstore = SimpleDocumentStore()
# insert nodes into docstore
docstore.add_documents(nodes)
# define storage context (will include vector store by default too)
storage_context = StorageContext.from_defaults(docstore=docstore)
self.base_index = VectorStoreIndex(leaf_nodes, storage_context=storage_context)
base_retriever = self.base_index.as_retriever(similarity_top_k=6)
self.retriever = AutoMergingRetriever(
base_retriever, storage_context, verbose=True
)
self.query_engine = RetrieverQueryEngine.from_args(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"node_parser": self.node_parser,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
| AutoMergingRetrieverPack |
python | numpy__numpy | benchmarks/benchmarks/bench_ma.py | {
"start": 6022,
"end": 6696
} | class ____(Benchmark):
param_names = ['margs', 'msize']
params = [[0, (0, 0), [0, -1]],
['small', 'big']]
def setup(self, margs, msize):
xs = np.random.uniform(-1, 1, 6).reshape(2, 3)
m1 = [[True, False, False], [False, False, True]]
xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100)
maskx = xl > 0.8
self.nmxs = np.ma.array(xs, mask=m1)
self.nmxl = np.ma.array(xl, mask=maskx)
def time_methods_getitem(self, margs, msize):
if msize == 'small':
mdat = self.nmxs
elif msize == 'big':
mdat = self.nmxl
mdat.__getitem__(margs)
| MAMethodGetItem |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/rich_jupyter_widget.py | {
"start": 1174,
"end": 1279
} | class ____(JupyterWidget):
"""Dummy class for config inheritance. Destroyed below."""
| RichIPythonWidget |
python | bokeh__bokeh | tests/unit/bokeh/plotting/test__legends.py | {
"start": 3897,
"end": 5353
} | class ____:
@pytest.mark.parametrize('arg', [1, 2.7, None, False, [], {}])
def test_bad_arg(self, arg: Any) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_group(arg, Legend(), GlyphRenderer())
def test_bad_source(self) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_group("foo", Legend(), GlyphRenderer())
with pytest.raises(ValueError):
bpl._handle_legend_group("foo", Legend(), GlyphRenderer(data_source=ColumnDataSource(data=dict(bar=[]))))
def test_items(self) -> None:
source = ColumnDataSource(data=dict(foo=[10,10,20,30,20,30,40]))
renderer = GlyphRenderer(data_source=source)
legend = Legend(items=[])
bpl._handle_legend_group("foo", legend, renderer)
assert len(legend.items) == 4
assert legend.items[0].label == value("10")
assert legend.items[0].renderers == [renderer]
assert legend.items[0].index == 0
assert legend.items[1].label == value("20")
assert legend.items[1].renderers == [renderer]
assert legend.items[1].index == 2
assert legend.items[2].label == value("30")
assert legend.items[2].renderers == [renderer]
assert legend.items[2].index == 3
assert legend.items[3].label == value("40")
assert legend.items[3].renderers == [renderer]
assert legend.items[3].index == 6
| Test__handle_legend_group |
python | sympy__sympy | sympy/core/function.py | {
"start": 68563,
"end": 73564
} | class ____(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
Examples
========
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
It is also possible to unpack tuple arguments:
>>> f = Lambda(((x, y), z), x + y + z)
>>> f((1, 2), 3)
6
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, signature, expr) -> Lambda:
if iterable(signature) and not isinstance(signature, (tuple, Tuple)):
sympy_deprecation_warning(
"""
Using a non-tuple iterable as the first argument to Lambda
is deprecated. Use Lambda(tuple(args), expr) instead.
""",
deprecated_since_version="1.5",
active_deprecations_target="deprecated-non-tuple-lambda",
)
signature = tuple(signature)
_sig = signature if iterable(signature) else (signature,)
sig: Tuple = sympify(_sig) # type: ignore
cls._check_signature(sig)
if len(sig) == 1 and sig[0] == expr:
return S.IdentityFunction
return Expr.__new__(cls, sig, sympify(expr))
@classmethod
def _check_signature(cls, sig):
syms = set()
def rcheck(args):
for a in args:
if a.is_symbol:
if a in syms:
raise BadSignatureError("Duplicate symbol %s" % a)
syms.add(a)
elif isinstance(a, Tuple):
rcheck(a)
else:
raise BadSignatureError("Lambda signature should be only tuples"
" and symbols, not %s" % a)
if not isinstance(sig, Tuple):
raise BadSignatureError("Lambda signature should be a tuple not %s" % sig)
# Recurse through the signature:
rcheck(sig)
@property
def signature(self):
"""The expected form of the arguments to be unpacked into variables"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def variables(self):
"""The variables used in the internal representation of the function"""
def _variables(args):
if isinstance(args, Tuple):
for arg in args:
yield from _variables(arg)
else:
yield args
return tuple(_variables(self.signature))
@property
def nargs(self):
from sympy.sets.sets import FiniteSet
return FiniteSet(len(self.signature))
bound_symbols = variables
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise BadArgumentsError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
d = self._match_signature(self.signature, args)
return self.expr.xreplace(d)
def _match_signature(self, sig, args):
symargmap = {}
def rmatch(pars, args):
for par, arg in zip(pars, args):
if par.is_symbol:
symargmap[par] = arg
elif isinstance(par, Tuple):
if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars):
raise BadArgumentsError("Can't match %s and %s" % (args, pars))
rmatch(par, arg)
rmatch(sig, args)
return symargmap
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
return self.signature == self.expr
def _eval_evalf(self, prec):
return self.func(self.args[0], self.args[1].evalf(n=prec_to_dps(prec)))
| Lambda |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/priority_queue_test.py | {
"start": 1323,
"end": 12841
} | class ____(test.TestCase):
def testRoundTripInsertReadOnceSorts(self):
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enq_list = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
for enq in enq_list:
enq.run()
deq = q.dequeue_many(100)
deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertMultiThreadedReadOnceSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enqueue_ops = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeued = []
def dequeue(dequeue_op):
(dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeued.extend(dequeue_indices)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op,)) for op in dequeue_ops
]
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
for t in dequeue_threads:
t.join()
self.assertAllEqual(sorted(dequeued), sorted(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
dequeue_wait = threading.Condition()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
def dequeue(dequeue_op, dequeued):
(dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeue_wait.acquire()
dequeued.extend(dequeue_indices)
dequeue_wait.release()
dequeued = []
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op, dequeued)) for op in dequeue_ops
]
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in dequeue_threads:
t.join()
# We can't guarantee full sorting because we can't guarantee
# that the dequeued.extend() call runs immediately after the
# self.evaluate() call. Here we're just happy everything came out.
self.assertAllEqual(set(dequeued), set(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
batch = 5
enqueue_ops = [
q.enqueue_many((elem[i * batch:(i + 1) * batch],
side_value_0[i * batch:(i + 1) * batch],
side_value_1[i * batch:(i + 1) * batch]))
for i in range(20)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertOnceReadOnceSorts(self):
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
side_value_0 = np.random.rand(1000).astype(bytes)
side_value_1 = np.random.rand(1000).astype(bytes)
q.enqueue_many((elem, side_value_0, side_value_1)).run()
deq = q.dequeue_many(1000)
deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)
allowed = {}
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
def testRoundTripInsertOnceReadManySorts(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
deq_values = np.hstack([q.dequeue_many(100)[0].eval() for _ in range(10)])
self.assertAllEqual(deq_values, sorted(elem))
def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
dequeue_op = q.dequeue()
deq_values = np.hstack([dequeue_op[0].eval() for _ in range(1000)])
self.assertAllEqual(deq_values, sorted(elem))
def testInsertingNonInt64Fails(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))
with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
def testInsertingNonScalarFails(self):
with self.cached_session() as sess:
input_priority = array_ops.placeholder(dtypes.int64)
input_other = array_ops.placeholder(dtypes.string)
q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
sess.run([q.enqueue((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[0, 2], dtype=np.int64),
input_other: np.random.rand(3, 5).astype(bytes)
})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
sess.run(
[q.enqueue_many((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[[0, 2], [3, 4]], dtype=np.int64),
input_other: np.random.rand(2, 3).astype(bytes)
})
if __name__ == "__main__":
test.main()
| PriorityQueueTest |
python | pytorch__pytorch | test/test_jit_fuser_te.py | {
"start": 101847,
"end": 106964
} | class ____(TestNNCOpInfoParent):
def setUp(self):
super(TestNNCOpInfoParent, self).setUp()
self.tensorexpr_options = TensorExprTestOptions()
def tearDown(self):
self.tensorexpr_options.restore()
super(TestNNCOpInfoParent, self).tearDown()
def te_compile(self, device, dtype, op):
if op.name in skip_ops:
return
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
param_names = []
param_values = []
fx_args = []
for idx, v in enumerate(arg_values):
if isinstance(v, torch.Tensor):
param_names.append(f"arg_{idx}")
param_values.append(v)
fx_args.append(param_names[-1])
else:
fx_args.append(f"{repr(v)}")
for k, v in kwarg_values.items():
if isinstance(v, torch.Tensor):
param_names.append(k)
param_values.append(v)
fx_args.append(f"{k} = {k}")
else:
fx_args.append(f"{k} = {repr(v)}")
code = f"""
def f({", ".join(param_names)}):
return op.op({", ".join(fx_args)})"""
g = {"torch": torch, "inf": math.inf, "op": op}
exec(code, g)
f = g["f"]
f.__module__ = "test"
out = f(*param_values)
ts_g = torch.jit.trace(f, param_values)
kernel = torch._C._te.TensorExprKernel(ts_g.graph)
correct_val = f(*param_values)
self.assertEqual(kernel.run(tuple(param_values)), correct_val)
self.assertEqual(kernel.fallback(tuple(param_values)), correct_val)
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops(
[op for op in op_db if get_name(op) in works_list],
allowed_dtypes=(torch.float,),
)
def test_working(self, device, dtype, op):
self.te_compile(device, dtype, op)
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops(
[op for op in op_db if get_name(op) in known_failures],
allowed_dtypes=(torch.float,),
)
def test_failures(self, device, dtype, op):
try:
self.te_compile(device, dtype, op)
except Exception as e:
pass
else:
raise RuntimeError(
"Expected test to fail. If it now works, move op into works_list"
)
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops(
[op for op in op_db if get_name(op) not in works_list + known_failures],
allowed_dtypes=(torch.float,),
)
def test_unsupported(self, device, dtype, op):
if get_name(op) in skip_ops:
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", TracerWarning) # noqa: F821
self.te_compile(device, dtype, op)
except Exception as e:
pass
else:
raise RuntimeError(
"Expected test to fail. If it now works, move op into works_list"
)
@slowTest
@onlyCPU
@ops(
[op for op in op_db if get_name(op) not in known_failures],
dtypes=OpDTypes.supported,
)
def test_nnc_correctness(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("Requires tracing support")
with NoTracerWarnContextManager() as no_warn:
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
ref = variant(
*clone_inputs((sample.input, *sample.args)), **sample.kwargs
)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
val = trace(
*clone_inputs((sample.input, *sample.args)), **sample.kwargs
)
atol = 2e-1 if dtype == torch.bfloat16 else 1e-5
rtol = 2e-1 if dtype == torch.bfloat16 else 1e-5
self.assertEqual(ref, val, atol=atol, rtol=rtol)
# https://github.com/pytorch/pytorch/issues/35600
# each torch.jit.trace adds state to the _python_cu compilation unit
# since this test traces a lot of functions, out-of-memory can occur
# if the CU is not cleared.
torch.jit._state._python_cu.drop_all_functions()
# CPU fuser not currently used in fbcode
only_for = ("cuda") if IS_FBCODE else ("cpu", "cuda")
instantiate_device_type_tests(TestNNCOpInfo, globals(), only_for=only_for)
# Purpose of this class is to allow super() calls. (See TestNNCOpInfoParent)
| TestNNCOpInfo |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/workers/container_instance.py | {
"start": 7404,
"end": 14106
} | class ____(BaseJobConfiguration):
"""
Configuration for an Azure Container Instance flow run.
"""
image: str = Field(default_factory=get_prefect_image_name)
resource_group_name: str = Field(default=...)
subscription_id: SecretStr = Field(default=...)
identities: Optional[List[str]] = Field(default=None)
entrypoint: Optional[str] = Field(default=DEFAULT_CONTAINER_ENTRYPOINT)
image_registry: DockerRegistry = Field(default=None)
cpu: float = Field(default=ACI_DEFAULT_CPU)
gpu_count: Optional[int] = Field(default=None)
gpu_sku: Optional[str] = Field(default=None)
memory: float = Field(default=ACI_DEFAULT_MEMORY)
subnet_ids: Optional[List[str]] = Field(default=None)
dns_servers: Optional[List[str]] = Field(default=None)
stream_output: bool = Field(default=False)
aci_credentials: AzureContainerInstanceCredentials = Field(
# default to an empty credentials object that will use
# `DefaultAzureCredential` to authenticate.
default_factory=AzureContainerInstanceCredentials
)
# Execution settings
task_start_timeout_seconds: int = Field(default=240)
task_watch_poll_interval: float = Field(default=5.0)
arm_template: Dict[str, Any] = Field(
json_schema_extra=dict(template=_get_default_arm_template())
)
keep_container_group: bool = Field(default=False)
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: Optional["DeploymentResponse"] = None,
flow: Optional["Flow"] = None,
work_pool: Optional["WorkPool"] = None,
worker_name: Optional[str] = None,
):
"""
Prepares the job configuration for a flow run.
"""
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
# expectations:
# - the first resource in the template is the container group
# - the container group has a single container
container_group = self.arm_template["resources"][0]
container = container_group["properties"]["containers"][0]
# set the container's environment variables
container["properties"]["environmentVariables"] = self._get_arm_environment()
# convert the command from a string to a list, because that's what ACI expects
if self.command:
container["properties"]["command"] = self.command.split(" ")
self._add_image()
# Add the entrypoint if provided. Creating an ACI container with a
# command overrides the container's built-in entrypoint. Prefect base images
# use entrypoint.sh as the entrypoint, so we need to add to the beginning of
# the command list to avoid breaking EXTRA_PIP_PACKAGES installation on
# container startup.
if self.entrypoint:
container["properties"]["command"].insert(0, self.entrypoint)
if self.image_registry:
self._add_image_registry_credentials(self.image_registry)
if self.identities:
self._add_identities(self.identities)
if self.subnet_ids:
self._add_subnets(self.subnet_ids)
if self.dns_servers:
self._add_dns_servers(self.dns_servers)
def _add_image(self):
"""
Add the image to the arm template.
"""
try:
self.arm_template["resources"][0]["properties"]["containers"][0][
"properties"
]["image"] = self.image
except KeyError:
raise ValueError("Unable to add image due to invalid job ARM template.")
def _add_image_registry_credentials(self, image_registry: DockerRegistry):
"""
Create image registry credentials based on the type of image_registry provided.
Args:
image_registry: An instance of a DockerRegistry or
ACRManagedIdentity object.
"""
if not image_registry:
return
if isinstance(image_registry, ACRManagedIdentity):
self.arm_template["resources"][0]["properties"][
"imageRegistryCredentials"
] = [
{
"server": image_registry.registry_url,
"identity": image_registry.identity,
}
]
elif isinstance(image_registry, DockerRegistryCredentials):
self.arm_template["resources"][0]["properties"][
"imageRegistryCredentials"
] = [
{
"server": image_registry.registry_url,
"username": image_registry.username,
"password": image_registry.password.get_secret_value(),
}
]
def _add_identities(self, identities: List[str]):
"""
Add identities to the container group.
Args:
identities: A list of user-assigned identities to add to
the container group.
"""
self.arm_template["resources"][0]["identity"] = {
"type": "UserAssigned",
"userAssignedIdentities": {
# note: For user-assigned identities, the key is the resource ID
# of the identity and the value is an empty object. See:
# https://docs.microsoft.com/en-us/azure/templates/microsoft.containerinstance/containergroups?tabs=bicep#identity-object # noqa
identity: {}
for identity in identities
},
}
def _add_subnets(self, subnet_ids: List[str]):
"""
Add subnets to the container group.
Args:
subnet_ids: A list of subnet ids to add to the container group.
"""
self.arm_template["resources"][0]["properties"]["subnetIds"] = [
{"id": subnet_id} for subnet_id in subnet_ids
]
def _add_dns_servers(self, dns_servers: List[str]):
"""
Add dns servers to the container group.
Args:
dns_servers: A list of dns servers to add to the container group.
"""
self.arm_template["resources"][0]["properties"]["dnsConfig"] = {
"nameServers": dns_servers
}
def _get_arm_environment(self):
"""
Returns the environment variables to pass to the ARM template.
"""
env = {**self._base_environment(), **self.env}
azure_env = [
(
{"name": key, "secureValue": value}
if key in ENV_SECRETS
else {"name": key, "value": value}
)
for key, value in env.items()
]
return azure_env
| AzureContainerJobConfiguration |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/extractors/test_base.py | {
"start": 5601,
"end": 5955
} | class ____(BaseOperator):
def execute(self, context) -> Any:
pass
def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage:
return OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FINISHED_FACETS,
)
| OperatorWithoutStart |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker_unified_studio.py | {
"start": 1221,
"end": 7985
} | class ____(BaseHook):
"""
Interact with Sagemaker Unified Studio Workflows.
This hook provides a wrapper around the Sagemaker Workflows Notebook Execution API.
Examples:
.. code-block:: python
from airflow.providers.amazon.aws.hooks.sagemaker_unified_studio import SageMakerNotebookHook
notebook_hook = SageMakerNotebookHook(
input_config={"input_path": "path/to/notebook.ipynb", "input_params": {"param1": "value1"}},
output_config={"output_uri": "folder/output/location/prefix", "output_formats": "NOTEBOOK"},
execution_name="notebook_execution",
waiter_delay=10,
waiter_max_attempts=1440,
)
:param execution_name: The name of the notebook job to be executed, this is same as task_id.
:param input_config: Configuration for the input file.
Example: {'input_path': 'folder/input/notebook.ipynb', 'input_params': {'param1': 'value1'}}
:param output_config: Configuration for the output format. It should include an output_formats parameter to specify the output format.
Example: {'output_formats': ['NOTEBOOK']}
:param compute: compute configuration to use for the notebook execution. This is a required attribute
if the execution is on a remote compute.
Example: { "instance_type": "ml.m5.large", "volume_size_in_gb": 30, "volume_kms_key_id": "", "image_uri": "string", "container_entrypoint": [ "string" ]}
:param termination_condition: conditions to match to terminate the remote execution.
Example: { "MaxRuntimeInSeconds": 3600 }
:param tags: tags to be associated with the remote execution runs.
Example: { "md_analytics": "logs" }
:param waiter_delay: Interval in seconds to check the task execution status.
:param waiter_max_attempts: Number of attempts to wait before returning FAILED.
"""
def __init__(
self,
execution_name: str,
input_config: dict | None = None,
output_config: dict | None = None,
compute: dict | None = None,
termination_condition: dict | None = None,
tags: dict | None = None,
waiter_delay: int = 10,
waiter_max_attempts: int = 1440,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._sagemaker_studio = SageMakerStudioAPI(self._get_sagemaker_studio_config())
self.execution_name = execution_name
self.input_config = input_config or {}
self.output_config = output_config or {"output_formats": ["NOTEBOOK"]}
self.compute = compute
self.termination_condition = termination_condition or {}
self.tags = tags or {}
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
def _get_sagemaker_studio_config(self):
config = ClientConfig()
config.overrides["execution"] = {"local": is_local_runner()}
return config
def _format_start_execution_input_config(self):
config = {
"notebook_config": {
"input_path": self.input_config.get("input_path"),
"input_parameters": self.input_config.get("input_params"),
},
}
return config
def _format_start_execution_output_config(self):
output_formats = self.output_config.get("output_formats")
config = {
"notebook_config": {
"output_formats": output_formats,
}
}
return config
def start_notebook_execution(self):
start_execution_params = {
"execution_name": self.execution_name,
"execution_type": "NOTEBOOK",
"input_config": self._format_start_execution_input_config(),
"output_config": self._format_start_execution_output_config(),
"termination_condition": self.termination_condition,
"tags": self.tags,
}
if self.compute:
start_execution_params["compute"] = self.compute
else:
start_execution_params["compute"] = {"instance_type": "ml.m4.xlarge"}
print(start_execution_params)
return self._sagemaker_studio.execution_client.start_execution(**start_execution_params)
def wait_for_execution_completion(self, execution_id, context):
wait_attempts = 0
while wait_attempts < self.waiter_max_attempts:
wait_attempts += 1
time.sleep(self.waiter_delay)
response = self._sagemaker_studio.execution_client.get_execution(execution_id=execution_id)
error_message = response.get("error_details", {}).get("error_message")
status = response["status"]
if "files" in response:
self._set_xcom_files(response["files"], context)
if "s3_path" in response:
self._set_xcom_s3_path(response["s3_path"], context)
ret = self._handle_state(execution_id, status, error_message)
if ret:
return ret
# If timeout, handle state FAILED with timeout message
return self._handle_state(execution_id, "FAILED", "Execution timed out")
def _set_xcom_files(self, files, context):
if not context:
error_message = "context is required"
raise AirflowException(error_message)
for file in files:
context["ti"].xcom_push(
key=f"{file['display_name']}.{file['file_format']}",
value=file["file_path"],
)
def _set_xcom_s3_path(self, s3_path, context):
if not context:
error_message = "context is required"
raise AirflowException(error_message)
context["ti"].xcom_push(
key="s3_path",
value=s3_path,
)
def _handle_state(self, execution_id, status, error_message):
finished_states = ["COMPLETED"]
in_progress_states = ["IN_PROGRESS", "STOPPING"]
if status in in_progress_states:
info_message = f"Execution {execution_id} is still in progress with state:{status}, will check for a terminal status again in {self.waiter_delay}"
self.log.info(info_message)
return None
execution_message = f"Exiting Execution {execution_id} State: {status}"
if status in finished_states:
self.log.info(execution_message)
return {"Status": status, "ExecutionId": execution_id}
log_error_message = f"Execution {execution_id} failed with error: {error_message}"
self.log.error(log_error_message)
if error_message == "":
error_message = execution_message
raise AirflowException(error_message)
| SageMakerNotebookHook |
python | django__django | tests/generic_views/views.py | {
"start": 7599,
"end": 7682
} | class ____(BookSigningConfig, generic.DayArchiveView):
pass
| BookSigningDayArchive |
python | ethereum__web3.py | web3/types.py | {
"start": 14043,
"end": 14206
} | class ____(TypedDict):
returnData: HexBytes
logs: Sequence[LogReceipt]
gasUsed: int
status: int
error: NotRequired[RPCError]
| SimulateV1CallResult |
python | tensorflow__tensorflow | tensorflow/python/client/session.py | {
"start": 23335,
"end": 59936
} | class ____(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('Argument `graph` must be a tf.Graph, but got '
f'"{type(graph).__name__}"')
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('Argument `target` must be a string, but got '
f'"{type(target).__name__}". Did you do '
'"Session(config)" instead of '
'"Session(config=config)"?')
raise TypeError('Argument `target` must be a string, but got '
f'"{type(target).__name__}"')
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('Argument `config` must be a tf.ConfigProto, but got '
f'"{type(config).__name__}"')
if (mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled()
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.set_non_mixed_precision_session_created(True)
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
with self._graph._c_graph.get() as c_graph:
self._session = tf_session.TF_NewSessionRef(c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return stack.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.sparse.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.sparse.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
@deprecation.deprecated(
'2023-06-01',
'This function is deprecated and we do not expect adding new'
'functionality to it. Please do not have your code depending'
'on this function.',
)
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
NOTE: This function is deprecated and we do not expect adding new
functionality to it. Please do not have your code depending on this
function.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
@deprecation.deprecated(
'2023-06-01',
'This function is deprecated and we do not expect adding new'
'functionality to it. Please do not have your code depending'
'on this function.',
)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
NOTE: This function is deprecated and we do not expect adding new
functionality to it. Please do not have your code depending on this
function.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError(f'Feed argument {feed} has invalid type '
f'"{type(feed).__name__}"')
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret argument `feed` key as Tensor: '
f'{e.message}')
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError(f'{feed} in argument `feed_dict` has invalid type '
f'"{type(feed).__name__}"')
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError(
f'Cannot interpret feed_dict key as Tensor: {e.args[0]}')
if isinstance(subfeed_val, tensor.Tensor):
raise TypeError(
'The value of a feed cannot be a tf.Tensor object. Acceptable '
'feed values include Python scalars, strings, lists, numpy '
'ndarrays, or TensorHandles. For reference, the tensor object '
f'was {str(feed_val)} which was passed to the argument '
f'`feed_dict` with key {str(feed)}.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
f'Type of feed value {str(subfeed_val)} with type ' +
f'{str(type(subfeed_val))} is not compatible with Tensor type '
f'{str(subfeed_dtype)}. Try explicitly setting the type of the '
'feed tensor to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t.ref()] = subfeed_val
else:
np_val = numpy_compat.np_asarray(subfeed_val, subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
f'Cannot feed value of shape {str(np_val.shape)} for Tensor '
f'{subfeed_t.name}, which has shape '
f'{str(subfeed_t.get_shape())}')
if not self.graph.is_feedable(subfeed_t):
raise ValueError(f'Tensor {subfeed_t.name} may not be fed.')
feed_dict_tensor[subfeed_t.ref()] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('Argument `feed_list` must be a list or tuple. '
f'Received: feed_list={feed_list}')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, tensor.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty `target_list`. '
f'Received: target_list={target_list} (non-empty)')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate_graph(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message) # pylint: disable=no-value-for-parameter
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object_)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor.ref()] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
| BaseSession |
python | doocs__leetcode | solution/2500-2599/2579.Count Total Number of Colored Cells/Solution.py | {
"start": 0,
"end": 94
} | class ____:
def coloredCells(self, n: int) -> int:
return 2 * n * (n - 1) + 1
| Solution |
python | django__django | tests/admin_views/tests.py | {
"start": 69035,
"end": 77144
} | class ____(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(
reverse("admin:admin_views_customarticle_changelist")
)
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, "custom_admin/change_list.html")
# Test custom add form template
response = self.client.get(reverse("admin:admin_views_customarticle_add"))
self.assertTemplateUsed(response, "custom_admin/add_form.html")
# Add an article so we can test delete, change, and history views
post = self.client.post(
reverse("admin:admin_views_customarticle_add"),
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
},
)
self.assertRedirects(
post, reverse("admin:admin_views_customarticle_changelist")
)
self.assertEqual(CustomArticle.objects.count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(
reverse("admin:admin_views_customarticle_change", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/change_form.html")
response = self.client.get(
reverse("admin:admin_views_customarticle_delete", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/delete_confirmation.html")
response = self.client.post(
reverse("admin:admin_views_customarticle_changelist"),
data={
"index": 0,
"action": ["delete_selected"],
"_selected_action": ["1"],
},
)
self.assertTemplateUsed(
response, "custom_admin/delete_selected_confirmation.html"
)
response = self.client.get(
reverse("admin:admin_views_customarticle_history", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/object_history.html")
# A custom popup response template may be specified by
# ModelAdmin.popup_response_template.
response = self.client.post(
reverse("admin:admin_views_customarticle_add") + "?%s=1" % IS_POPUP_VAR,
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
IS_POPUP_VAR: "1",
},
)
self.assertEqual(response.template_name, "custom_admin/popup_response.html")
def test_extended_bodyclass_template_change_form(self):
"""
The admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_extrabody(self):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "extrabody_check\n</body>")
def test_change_password_template(self):
user = User.objects.get(username="super")
response = self.client.get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
# The auth/user/change_password.html template uses super in the
# bodyclass block.
self.assertContains(response, "bodyclass_consistency_check ")
# When a site has multiple passwords in the browser's password manager,
# a browser pop up asks which user the new password is for. To prevent
# this, the username is added to the change password form.
self.assertContains(
response, '<input type="text" name="username" value="super" class="hidden">'
)
# help text for passwords has an id.
self.assertContains(
response,
'<div class="help" id="id_password1_helptext"><ul><li>'
"Your password can’t be too similar to your other personal information."
"</li><li>Your password can’t be entirely numeric.</li></ul></div>",
)
self.assertContains(
response,
'<div class="help" id="id_password2_helptext">'
"Enter the same password as before, for verification.</div>",
)
def test_change_password_template_helptext_no_id(self):
user = User.objects.get(username="super")
class EmptyIdForLabelTextInput(forms.TextInput):
def id_for_label(self, id):
return None
class EmptyIdForLabelHelpTextPasswordChangeForm(AdminPasswordChangeForm):
password1 = forms.CharField(
help_text="Your new password", widget=EmptyIdForLabelTextInput()
)
class CustomUserAdmin(UserAdmin):
change_password_form = EmptyIdForLabelHelpTextPasswordChangeForm
request = RequestFactory().get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
request.user = user
user_admin = CustomUserAdmin(User, site)
response = user_admin.user_change_password(request, str(user.pk))
self.assertContains(response, '<div class="help">')
def test_custom_password_change_form(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin4:password_change"))
self.assertContains(response, "Custom old password label")
def test_extended_bodyclass_template_index(self):
"""
The admin/index.html template uses block.super in the bodyclass block.
"""
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_change_list(self):
"""
The admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_login(self):
"""
The admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse("admin:login"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_confirmation(self):
"""
The admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse("admin:auth_group_delete", args=(group.id,)))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
The admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": group.id,
}
response = self.client.post(reverse("admin:auth_group_changelist"), post_data)
self.assertEqual(response.context["site_header"], "Django administration")
self.assertContains(response, "bodyclass_consistency_check ")
def test_filter_with_custom_template(self):
"""
A custom template can be used to render an admin filter.
"""
response = self.client.get(reverse("admin:admin_views_color2_changelist"))
self.assertTemplateUsed(response, "custom_filter_template.html")
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminCustomTemplateTests |
python | ansible__ansible | lib/ansible/plugins/action/unarchive.py | {
"start": 983,
"end": 4422
} | class ____(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
""" handler for unarchive operations """
if task_vars is None:
task_vars = dict()
super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
creates = self._task.args.get('creates', None)
decrypt = self._task.args.get('decrypt', True)
try:
# "copy" is deprecated in favor of "remote_src".
if 'copy' in self._task.args:
# They are mutually exclusive.
if 'remote_src' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
# We will take the information from copy and store it in
# the remote_src var to use later in this file.
self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
if source is None or dest is None:
raise AnsibleActionFail("src (or content) and dest are required")
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
creates = self._remote_expand_user(creates)
if self._remote_file_exists(creates):
raise AnsibleActionSkip("skipped, since %s exists" % creates)
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if not remote_src:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
if not remote_stat['exists'] or not remote_stat['isdir']:
raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
if not remote_src:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
# remove action plugin only keys
new_module_args = self._task.args.copy()
for key in ('decrypt',):
if key in new_module_args:
del new_module_args[key]
if not remote_src:
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
new_module_args['src'] = tmp_src
# execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections
# collisions with local override
return self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
| ActionModule |
python | zarr-developers__zarr-python | tests/test_indexing.py | {
"start": 1402,
"end": 67199
} | class ____(MemoryStore):
counter: Counter[tuple[str, str]]
@classmethod
async def open(cls) -> CountingDict:
store = await super().open()
store.counter = Counter()
return store
async def get(
self,
key: str,
prototype: BufferPrototype,
byte_range: tuple[int | None, int | None] | None = None,
) -> Buffer | None:
key_suffix = "/".join(key.split("/")[1:])
self.counter["__getitem__", key_suffix] += 1
return await super().get(key, prototype, byte_range)
async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None:
key_suffix = "/".join(key.split("/")[1:])
self.counter["__setitem__", key_suffix] += 1
return await super().set(key, value, byte_range)
def test_normalize_integer_selection() -> None:
assert 1 == normalize_integer_selection(1, 100)
assert 99 == normalize_integer_selection(-1, 100)
with pytest.raises(IndexError):
normalize_integer_selection(100, 100)
with pytest.raises(IndexError):
normalize_integer_selection(1000, 100)
with pytest.raises(IndexError):
normalize_integer_selection(-1000, 100)
def test_replace_ellipsis() -> None:
# 1D, single item
assert (0,) == replace_ellipsis(0, (100,))
# 1D
assert (slice(None),) == replace_ellipsis(Ellipsis, (100,))
assert (slice(None),) == replace_ellipsis(slice(None), (100,))
assert (slice(None, 100),) == replace_ellipsis(slice(None, 100), (100,))
assert (slice(0, None),) == replace_ellipsis(slice(0, None), (100,))
assert (slice(None),) == replace_ellipsis((slice(None), Ellipsis), (100,))
assert (slice(None),) == replace_ellipsis((Ellipsis, slice(None)), (100,))
# 2D, single item
assert (0, 0) == replace_ellipsis((0, 0), (100, 100))
assert (-1, 1) == replace_ellipsis((-1, 1), (100, 100))
# 2D, single col/row
assert (0, slice(None)) == replace_ellipsis((0, slice(None)), (100, 100))
assert (0, slice(None)) == replace_ellipsis((0,), (100, 100))
assert (slice(None), 0) == replace_ellipsis((slice(None), 0), (100, 100))
# 2D slice
assert (slice(None), slice(None)) == replace_ellipsis(Ellipsis, (100, 100))
assert (slice(None), slice(None)) == replace_ellipsis(slice(None), (100, 100))
assert (slice(None), slice(None)) == replace_ellipsis((slice(None), slice(None)), (100, 100))
assert (slice(None), slice(None)) == replace_ellipsis((Ellipsis, slice(None)), (100, 100))
assert (slice(None), slice(None)) == replace_ellipsis((slice(None), Ellipsis), (100, 100))
assert (slice(None), slice(None)) == replace_ellipsis(
(slice(None), Ellipsis, slice(None)), (100, 100)
)
assert (slice(None), slice(None)) == replace_ellipsis(
(Ellipsis, slice(None), slice(None)), (100, 100)
)
assert (slice(None), slice(None)) == replace_ellipsis(
(slice(None), slice(None), Ellipsis), (100, 100)
)
@pytest.mark.parametrize(
("value", "dtype"),
[
(42, "uint8"),
pytest.param(
(b"aaa", 1, 4.2), [("foo", "S3"), ("bar", "i4"), ("baz", "f8")], marks=pytest.mark.xfail
),
],
)
@pytest.mark.parametrize("use_out", [True, False])
def test_get_basic_selection_0d(store: StorePath, use_out: bool, value: Any, dtype: Any) -> None:
# setup
arr_np = np.array(value, dtype=dtype)
arr_z = zarr_array_from_numpy_array(store, arr_np)
assert_array_equal(arr_np, arr_z.get_basic_selection(Ellipsis))
assert_array_equal(arr_np, arr_z[...])
assert value == arr_z.get_basic_selection(())
assert value == arr_z[()]
if use_out:
# test out param
b = default_buffer_prototype().nd_buffer.from_numpy_array(np.zeros_like(arr_np))
arr_z.get_basic_selection(Ellipsis, out=b)
assert_array_equal(arr_np, b.as_ndarray_like())
# todo: uncomment the structured array tests when we can make them pass,
# or delete them if we formally decide not to support structured dtypes.
# test structured array
# value = (b"aaa", 1, 4.2)
# a = np.array(value, dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")])
# z = zarr_array_from_numpy_array(store, a)
# z[()] = value
# assert_array_equal(a, z.get_basic_selection(Ellipsis))
# assert_array_equal(a, z[...])
# assert a[()] == z.get_basic_selection(())
# assert a[()] == z[()]
# assert b"aaa" == z.get_basic_selection((), fields="foo")
# assert b"aaa" == z["foo"]
# assert a[["foo", "bar"]] == z.get_basic_selection((), fields=["foo", "bar"])
# assert a[["foo", "bar"]] == z["foo", "bar"]
# # test out param
# b = NDBuffer.from_numpy_array(np.zeros_like(a))
# z.get_basic_selection(Ellipsis, out=b)
# assert_array_equal(a, b)
# c = NDBuffer.from_numpy_array(np.zeros_like(a[["foo", "bar"]]))
# z.get_basic_selection(Ellipsis, out=c, fields=["foo", "bar"])
# assert_array_equal(a[["foo", "bar"]], c)
basic_selections_1d: list[BasicSelection] = [
# single value
42,
-1,
# slices
slice(0, 1050),
slice(50, 150),
slice(0, 2000),
slice(-150, -50),
slice(-2000, 2000),
slice(0, 0), # empty result
slice(-1, 0), # empty result
# total selections
slice(None),
Ellipsis,
(),
(Ellipsis, slice(None)),
# slice with step
slice(None),
slice(None, None),
slice(None, None, 1),
slice(None, None, 10),
slice(None, None, 100),
slice(None, None, 1000),
slice(None, None, 10000),
slice(0, 1050),
slice(0, 1050, 1),
slice(0, 1050, 10),
slice(0, 1050, 100),
slice(0, 1050, 1000),
slice(0, 1050, 10000),
slice(1, 31, 3),
slice(1, 31, 30),
slice(1, 31, 300),
slice(81, 121, 3),
slice(81, 121, 30),
slice(81, 121, 300),
slice(50, 150),
slice(50, 150, 1),
slice(50, 150, 10),
]
basic_selections_1d_bad = [
# only positive step supported
slice(None, None, -1),
slice(None, None, -10),
slice(None, None, -100),
slice(None, None, -1000),
slice(None, None, -10000),
slice(1050, -1, -1),
slice(1050, -1, -10),
slice(1050, -1, -100),
slice(1050, -1, -1000),
slice(1050, -1, -10000),
slice(1050, 0, -1),
slice(1050, 0, -10),
slice(1050, 0, -100),
slice(1050, 0, -1000),
slice(1050, 0, -10000),
slice(150, 50, -1),
slice(150, 50, -10),
slice(31, 1, -3),
slice(121, 81, -3),
slice(-1, 0, -1),
# bad stuff
2.3,
"foo",
b"xxx",
None,
(0, 0),
(slice(None), slice(None)),
]
def _test_get_basic_selection(
a: npt.NDArray[Any] | Array, z: Array, selection: BasicSelection
) -> None:
expect = a[selection]
actual = z.get_basic_selection(selection)
assert_array_equal(expect, actual)
actual = z[selection]
assert_array_equal(expect, actual)
# test out param
b = default_buffer_prototype().nd_buffer.from_numpy_array(
np.empty(shape=expect.shape, dtype=expect.dtype)
)
z.get_basic_selection(selection, out=b)
assert_array_equal(expect, b.as_numpy_array())
# noinspection PyStatementEffect
def test_get_basic_selection_1d(store: StorePath) -> None:
# setup
a = np.arange(1050, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
for selection in basic_selections_1d:
_test_get_basic_selection(a, z, selection)
for selection_bad in basic_selections_1d_bad:
with pytest.raises(IndexError):
z.get_basic_selection(selection_bad) # type: ignore[arg-type]
with pytest.raises(IndexError):
z[selection_bad] # type: ignore[index]
with pytest.raises(IndexError):
z.get_basic_selection([1, 0]) # type: ignore[arg-type]
basic_selections_2d: list[BasicSelection] = [
# single row
42,
-1,
(42, slice(None)),
(-1, slice(None)),
# single col
(slice(None), 4),
(slice(None), -1),
# row slices
slice(None),
slice(0, 1000),
slice(250, 350),
slice(0, 2000),
slice(-350, -250),
slice(0, 0), # empty result
slice(-1, 0), # empty result
slice(-2000, 0),
slice(-2000, 2000),
# 2D slices
(slice(None), slice(1, 5)),
(slice(250, 350), slice(None)),
(slice(250, 350), slice(1, 5)),
(slice(250, 350), slice(-5, -1)),
(slice(250, 350), slice(-50, 50)),
(slice(250, 350, 10), slice(1, 5)),
(slice(250, 350), slice(1, 5, 2)),
(slice(250, 350, 33), slice(1, 5, 3)),
# total selections
(slice(None), slice(None)),
Ellipsis,
(),
(Ellipsis, slice(None)),
(Ellipsis, slice(None), slice(None)),
]
basic_selections_2d_bad = [
# bad stuff
2.3,
"foo",
b"xxx",
None,
(2.3, slice(None)),
# only positive step supported
slice(None, None, -1),
(slice(None, None, -1), slice(None)),
(0, 0, 0),
(slice(None), slice(None), slice(None)),
]
# noinspection PyStatementEffect
def test_get_basic_selection_2d(store: StorePath) -> None:
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
for selection in basic_selections_2d:
_test_get_basic_selection(a, z, selection)
bad_selections = basic_selections_2d_bad + [
# integer arrays
[0, 1],
(slice(None), [0, 1]),
]
for selection_bad in bad_selections:
with pytest.raises(IndexError):
z.get_basic_selection(selection_bad) # type: ignore[arg-type]
# check fallback on fancy indexing
fancy_selection = ([0, 1], [0, 1])
np.testing.assert_array_equal(z[fancy_selection], [0, 11])
def test_fancy_indexing_fallback_on_get_setitem(store: StorePath) -> None:
z = zarr_array_from_numpy_array(store, np.zeros((20, 20)))
z[[1, 2, 3], [1, 2, 3]] = 1
np.testing.assert_array_equal(
z[:4, :4],
[
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
],
)
np.testing.assert_array_equal(z[[1, 2, 3], [1, 2, 3]], 1)
# test broadcasting
np.testing.assert_array_equal(z[1, [1, 2, 3]], [1, 0, 0])
# test 1D fancy indexing
z2 = zarr_array_from_numpy_array(store, np.zeros(5))
z2[[1, 2, 3]] = 1
np.testing.assert_array_equal(z2[:], [0, 1, 1, 1, 0])
@pytest.mark.parametrize(
("index", "expected_result"),
[
# Single iterable of integers
([0, 1], [[0, 1, 2], [3, 4, 5]]),
# List first, then slice
(([0, 1], slice(None)), [[0, 1, 2], [3, 4, 5]]),
# List first, then slice
(([0, 1], slice(1, None)), [[1, 2], [4, 5]]),
# Slice first, then list
((slice(0, 2), [0, 2]), [[0, 2], [3, 5]]),
# Slices only
((slice(0, 2), slice(0, 2)), [[0, 1], [3, 4]]),
# List with repeated index
(([1, 0, 1], slice(1, None)), [[4, 5], [1, 2], [4, 5]]),
# 1D indexing
(([1, 0, 1]), [[3, 4, 5], [0, 1, 2], [3, 4, 5]]),
],
)
def test_orthogonal_indexing_fallback_on_getitem_2d(
store: StorePath, index: Selection, expected_result: npt.ArrayLike
) -> None:
"""
Tests the orthogonal indexing fallback on __getitem__ for a 2D matrix.
In addition to checking expected behavior, all indexing
is also checked against numpy.
"""
# [0, 1, 2],
# [3, 4, 5],
# [6, 7, 8]
a = np.arange(9).reshape(3, 3)
z = zarr_array_from_numpy_array(store, a)
np.testing.assert_array_equal(z[index], a[index], err_msg="Indexing disagrees with numpy")
np.testing.assert_array_equal(z[index], expected_result)
@pytest.mark.skip(reason="fails on ubuntu, windows; numpy=2.2; in CI")
def test_setitem_repeated_index():
array = zarr.array(data=np.zeros((4,)), chunks=(1,))
indexer = np.array([-1, -1, 0, 0])
array.oindex[(indexer,)] = [0, 1, 2, 3]
np.testing.assert_array_equal(array[:], np.array([3, 0, 0, 1]))
indexer = np.array([-1, 0, 0, -1])
array.oindex[(indexer,)] = [0, 1, 2, 3]
np.testing.assert_array_equal(array[:], np.array([2, 0, 0, 3]))
Index = list[int] | tuple[slice | int | list[int], ...]
@pytest.mark.parametrize(
("index", "expected_result"),
[
# Single iterable of integers
([0, 1], [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], [15, 16, 17]]]),
# One slice, two integers
((slice(0, 2), 1, 1), [4, 13]),
# One integer, two slices
((slice(0, 2), 1, slice(0, 2)), [[3, 4], [12, 13]]),
# Two slices and a list
((slice(0, 2), [1, 2], slice(0, 2)), [[[3, 4], [6, 7]], [[12, 13], [15, 16]]]),
],
)
def test_orthogonal_indexing_fallback_on_getitem_3d(
store: StorePath, index: Selection, expected_result: npt.ArrayLike
) -> None:
"""
Tests the orthogonal indexing fallback on __getitem__ for a 3D matrix.
In addition to checking expected behavior, all indexing
is also checked against numpy.
"""
# [[[ 0, 1, 2],
# [ 3, 4, 5],
# [ 6, 7, 8]],
# [[ 9, 10, 11],
# [12, 13, 14],
# [15, 16, 17]],
# [[18, 19, 20],
# [21, 22, 23],
# [24, 25, 26]]]
a = np.arange(27).reshape(3, 3, 3)
z = zarr_array_from_numpy_array(store, a)
np.testing.assert_array_equal(z[index], a[index], err_msg="Indexing disagrees with numpy")
np.testing.assert_array_equal(z[index], expected_result)
@pytest.mark.parametrize(
("index", "expected_result"),
[
# Single iterable of integers
([0, 1], [[1, 1, 1], [1, 1, 1], [0, 0, 0]]),
# List and slice combined
(([0, 1], slice(1, 3)), [[0, 1, 1], [0, 1, 1], [0, 0, 0]]),
# Index repetition is ignored on setitem
(([0, 1, 1, 1, 1, 1, 1], slice(1, 3)), [[0, 1, 1], [0, 1, 1], [0, 0, 0]]),
# Slice with step
(([0, 2], slice(None, None, 2)), [[1, 0, 1], [0, 0, 0], [1, 0, 1]]),
],
)
def test_orthogonal_indexing_fallback_on_setitem_2d(
store: StorePath, index: Selection, expected_result: npt.ArrayLike
) -> None:
"""
Tests the orthogonal indexing fallback on __setitem__ for a 3D matrix.
In addition to checking expected behavior, all indexing
is also checked against numpy.
"""
# Slice + fancy index
a = np.zeros((3, 3))
z = zarr_array_from_numpy_array(store, a)
z[index] = 1
a[index] = 1
np.testing.assert_array_equal(z[:], expected_result)
np.testing.assert_array_equal(z[:], a, err_msg="Indexing disagrees with numpy")
def test_fancy_indexing_doesnt_mix_with_implicit_slicing(store: StorePath) -> None:
z2 = zarr_array_from_numpy_array(store, np.zeros((5, 5, 5)))
with pytest.raises(IndexError):
z2[[1, 2, 3], [1, 2, 3]] = 2
with pytest.raises(IndexError):
np.testing.assert_array_equal(z2[[1, 2, 3], [1, 2, 3]], 0)
with pytest.raises(IndexError):
z2[..., [1, 2, 3]] = 2 # type: ignore[index]
with pytest.raises(IndexError):
np.testing.assert_array_equal(z2[..., [1, 2, 3]], 0) # type: ignore[index]
@pytest.mark.parametrize(
("value", "dtype"),
[
(42, "uint8"),
pytest.param(
(b"aaa", 1, 4.2), [("foo", "S3"), ("bar", "i4"), ("baz", "f8")], marks=pytest.mark.xfail
),
],
)
def test_set_basic_selection_0d(
store: StorePath, value: Any, dtype: str | list[tuple[str, str]]
) -> None:
arr_np = np.array(value, dtype=dtype)
arr_np_zeros = np.zeros_like(arr_np, dtype=dtype)
arr_z = zarr_array_from_numpy_array(store, arr_np_zeros)
assert_array_equal(arr_np_zeros, arr_z)
arr_z.set_basic_selection(Ellipsis, value)
assert_array_equal(value, arr_z)
arr_z[...] = 0
assert_array_equal(arr_np_zeros, arr_z)
arr_z[...] = value
assert_array_equal(value, arr_z)
# todo: uncomment the structured array tests when we can make them pass,
# or delete them if we formally decide not to support structured dtypes.
# arr_z.set_basic_selection(Ellipsis, v["foo"], fields="foo")
# assert v["foo"] == arr_z["foo"]
# assert arr_np_zeros["bar"] == arr_z["bar"]
# assert arr_np_zeros["baz"] == arr_z["baz"]
# arr_z["bar"] = v["bar"]
# assert v["foo"] == arr_z["foo"]
# assert v["bar"] == arr_z["bar"]
# assert arr_np_zeros["baz"] == arr_z["baz"]
# # multiple field assignment not supported
# with pytest.raises(IndexError):
# arr_z.set_basic_selection(Ellipsis, v[["foo", "bar"]], fields=["foo", "bar"])
# with pytest.raises(IndexError):
# arr_z[..., "foo", "bar"] = v[["foo", "bar"]]
def _test_get_orthogonal_selection(
a: npt.NDArray[Any], z: Array, selection: OrthogonalSelection
) -> None:
expect = oindex(a, selection)
actual = z.get_orthogonal_selection(selection)
assert_array_equal(expect, actual)
actual = z.oindex[selection]
assert_array_equal(expect, actual)
# noinspection PyStatementEffect
def test_get_orthogonal_selection_1d_bool(store: StorePath) -> None:
# setup
a = np.arange(1050, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
_test_get_orthogonal_selection(a, z, ix)
# test errors
with pytest.raises(IndexError):
z.oindex[np.zeros(50, dtype=bool)] # too short
with pytest.raises(IndexError):
z.oindex[np.zeros(2000, dtype=bool)] # too long
with pytest.raises(IndexError):
# too many dimensions
z.oindex[[[True, False], [False, True]]] # type: ignore[index]
# noinspection PyStatementEffect
def test_get_orthogonal_selection_1d_int(store: StorePath) -> None:
# setup
a = np.arange(550, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
# sorted integer arrays
ix = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix.sort()
_test_get_orthogonal_selection(a, z, ix)
selections = basic_selections_1d + [
# test wraparound
[0, 3, 10, -23, -12, -1],
# explicit test not sorted
[3, 105, 23, 127],
]
for selection in selections:
_test_get_orthogonal_selection(a, z, selection)
bad_selections = basic_selections_1d_bad + [
[a.shape[0] + 1], # out of bounds
[-(a.shape[0] + 1)], # out of bounds
[[2, 4], [6, 8]], # too many dimensions
]
for bad_selection in bad_selections:
with pytest.raises(IndexError):
z.get_orthogonal_selection(bad_selection) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.oindex[bad_selection] # type: ignore[index]
def _test_get_orthogonal_selection_2d(
a: npt.NDArray[Any], z: Array, ix0: npt.NDArray[np.bool], ix1: npt.NDArray[np.bool]
) -> None:
selections = [
# index both axes with array
(ix0, ix1),
# mixed indexing with array / slice
(ix0, slice(1, 5)),
(ix0, slice(1, 5, 2)),
(slice(250, 350), ix1),
(slice(250, 350, 10), ix1),
# mixed indexing with array / int
(ix0, 4),
(42, ix1),
]
for selection in selections:
_test_get_orthogonal_selection(a, z, selection)
# noinspection PyStatementEffect
def test_get_orthogonal_selection_2d(store: StorePath) -> None:
# setup
a = np.arange(5400, dtype=int).reshape(600, 9)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
_test_get_orthogonal_selection_2d(a, z, ix0, ix1)
# mixed int array / bool array
selections = (
(ix0, np.nonzero(ix1)[0]),
(np.nonzero(ix0)[0], ix1),
)
for selection in selections:
_test_get_orthogonal_selection(a, z, selection)
# sorted integer arrays
ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True)
ix0.sort()
ix1.sort()
_test_get_orthogonal_selection_2d(a, z, ix0, ix1)
for selection_2d in basic_selections_2d:
_test_get_orthogonal_selection(a, z, selection_2d)
for selection_2d_bad in basic_selections_2d_bad:
with pytest.raises(IndexError):
z.get_orthogonal_selection(selection_2d_bad) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.oindex[selection_2d_bad] # type: ignore[index]
def _test_get_orthogonal_selection_3d(
a: npt.NDArray,
z: Array,
ix0: npt.NDArray[np.bool],
ix1: npt.NDArray[np.bool],
ix2: npt.NDArray[np.bool],
) -> None:
selections = [
# single value
(60, 15, 4),
(-1, -1, -1),
# index all axes with array
(ix0, ix1, ix2),
# mixed indexing with single array / slices
(ix0, slice(10, 20), slice(1, 5)),
(slice(30, 50), ix1, slice(1, 5)),
(slice(30, 50), slice(10, 20), ix2),
(ix0, slice(10, 20, 5), slice(1, 5, 2)),
(slice(30, 50, 3), ix1, slice(1, 5, 2)),
(slice(30, 50, 3), slice(10, 20, 5), ix2),
# mixed indexing with single array / ints
(ix0, 15, 4),
(60, ix1, 4),
(60, 15, ix2),
# mixed indexing with single array / slice / int
(ix0, slice(10, 20), 4),
(15, ix1, slice(1, 5)),
(slice(30, 50), 15, ix2),
# mixed indexing with two array / slice
(ix0, ix1, slice(1, 5)),
(slice(30, 50), ix1, ix2),
(ix0, slice(10, 20), ix2),
# mixed indexing with two array / integer
(ix0, ix1, 4),
(15, ix1, ix2),
(ix0, 15, ix2),
]
for selection in selections:
_test_get_orthogonal_selection(a, z, selection)
def test_get_orthogonal_selection_3d(store: StorePath) -> None:
# setup
a = np.arange(32400, dtype=int).reshape(120, 30, 9)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(60, 20, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
ix2 = np.random.binomial(1, 0.5, size=a.shape[2]).astype(bool)
_test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2)
# sorted integer arrays
ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True)
ix2 = np.random.choice(a.shape[2], size=int(a.shape[2] * 0.5), replace=True)
ix0.sort()
ix1.sort()
ix2.sort()
_test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2)
def test_orthogonal_indexing_edge_cases(store: StorePath) -> None:
a = np.arange(6).reshape(1, 2, 3)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(1, 2, 3))
expect = oindex(a, (0, slice(None), [0, 1, 2]))
actual = z.oindex[0, :, [0, 1, 2]]
assert_array_equal(expect, actual)
expect = oindex(a, (0, slice(None), [True, True, True]))
actual = z.oindex[0, :, [True, True, True]]
assert_array_equal(expect, actual)
def _test_set_orthogonal_selection(
v: npt.NDArray[np.int_], a: npt.NDArray[Any], z: Array, selection: OrthogonalSelection
) -> None:
for value in 42, oindex(v, selection), oindex(v, selection).tolist():
if isinstance(value, list) and value == []:
# skip these cases as cannot preserve all dimensions
continue
# setup expectation
a[:] = 0
oindex_set(a, selection, value)
# long-form API
z[:] = 0
z.set_orthogonal_selection(selection, value)
assert_array_equal(a, z[:])
# short-form API
z[:] = 0
z.oindex[selection] = value
assert_array_equal(a, z[:])
def test_set_orthogonal_selection_1d(store: StorePath) -> None:
# setup
v = np.arange(550, dtype=int)
a = np.empty(v.shape, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
# test with different degrees of sparseness
np.random.seed(42)
for p in 0.5, 0.01:
# boolean arrays
ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
_test_set_orthogonal_selection(v, a, z, ix)
# sorted integer arrays
ix = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix.sort()
_test_set_orthogonal_selection(v, a, z, ix)
# basic selections
for selection in basic_selections_1d:
_test_set_orthogonal_selection(v, a, z, selection)
def test_set_item_1d_last_two_chunks(store: StorePath):
# regression test for GH2849
g = zarr.open_group(store=store, zarr_format=3, mode="w")
a = g.create_array("bar", shape=(10,), chunks=(3,), dtype=int)
data = np.array([7, 8, 9])
a[slice(7, 10)] = data
np.testing.assert_array_equal(a[slice(7, 10)], data)
z = zarr.open_group(store=store, mode="w")
z.create_array("zoo", dtype=float, shape=())
z["zoo"][...] = np.array(1) # why doesn't [:] work?
np.testing.assert_equal(z["zoo"][()], np.array(1))
z = zarr.open_group(store=store, mode="w")
z.create_array("zoo", dtype=float, shape=())
z["zoo"][...] = 1 # why doesn't [:] work?
np.testing.assert_equal(z["zoo"][()], np.array(1))
def _test_set_orthogonal_selection_2d(
v: npt.NDArray[np.int_],
a: npt.NDArray[np.int_],
z: Array,
ix0: npt.NDArray[np.bool],
ix1: npt.NDArray[np.bool],
) -> None:
selections = [
# index both axes with array
(ix0, ix1),
# mixed indexing with array / slice or int
(ix0, slice(1, 5)),
(slice(250, 350), ix1),
(ix0, 4),
(42, ix1),
]
for selection in selections:
_test_set_orthogonal_selection(v, a, z, selection)
def test_set_orthogonal_selection_2d(store: StorePath) -> None:
# setup
v = np.arange(5400, dtype=int).reshape(600, 9)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
_test_set_orthogonal_selection_2d(v, a, z, ix0, ix1)
# sorted integer arrays
ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True)
ix0.sort()
ix1.sort()
_test_set_orthogonal_selection_2d(v, a, z, ix0, ix1)
for selection in basic_selections_2d:
_test_set_orthogonal_selection(v, a, z, selection)
def _test_set_orthogonal_selection_3d(
v: npt.NDArray[np.int_],
a: npt.NDArray[np.int_],
z: Array,
ix0: npt.NDArray[np.bool],
ix1: npt.NDArray[np.bool],
ix2: npt.NDArray[np.bool],
) -> None:
selections = (
# single value
(60, 15, 4),
(-1, -1, -1),
# index all axes with bool array
(ix0, ix1, ix2),
# mixed indexing with single bool array / slice or int
(ix0, slice(10, 20), slice(1, 5)),
(slice(30, 50), ix1, slice(1, 5)),
(slice(30, 50), slice(10, 20), ix2),
(ix0, 15, 4),
(60, ix1, 4),
(60, 15, ix2),
(ix0, slice(10, 20), 4),
(slice(30, 50), ix1, 4),
(slice(30, 50), 15, ix2),
# indexing with two arrays / slice
(ix0, ix1, slice(1, 5)),
# indexing with two arrays / integer
(ix0, ix1, 4),
)
for selection in selections:
_test_set_orthogonal_selection(v, a, z, selection)
def test_set_orthogonal_selection_3d(store: StorePath) -> None:
# setup
v = np.arange(32400, dtype=int).reshape(120, 30, 9)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(60, 20, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
ix2 = np.random.binomial(1, 0.5, size=a.shape[2]).astype(bool)
_test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2)
# sorted integer arrays
ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True)
ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True)
ix2 = np.random.choice(a.shape[2], size=int(a.shape[2] * 0.5), replace=True)
ix0.sort()
ix1.sort()
ix2.sort()
_test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2)
def test_orthogonal_indexing_fallback_on_get_setitem(store: StorePath) -> None:
z = zarr_array_from_numpy_array(store, np.zeros((20, 20)))
z[[1, 2, 3], [1, 2, 3]] = 1
np.testing.assert_array_equal(
z[:4, :4],
[
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
],
)
np.testing.assert_array_equal(z[[1, 2, 3], [1, 2, 3]], 1)
# test broadcasting
np.testing.assert_array_equal(z[1, [1, 2, 3]], [1, 0, 0])
# test 1D fancy indexing
z2 = zarr_array_from_numpy_array(store, np.zeros(5))
z2[[1, 2, 3]] = 1
np.testing.assert_array_equal(z2[:], [0, 1, 1, 1, 0])
def _test_get_coordinate_selection(
a: npt.NDArray, z: Array, selection: CoordinateSelection
) -> None:
expect = a[selection]
actual = z.get_coordinate_selection(selection)
assert_array_equal(expect, actual)
actual = z.vindex[selection]
assert_array_equal(expect, actual)
coordinate_selections_1d_bad = [
# slice not supported
slice(5, 15),
slice(None),
Ellipsis,
# bad stuff
2.3,
"foo",
b"xxx",
None,
(0, 0),
(slice(None), slice(None)),
]
# noinspection PyStatementEffect
def test_get_coordinate_selection_1d(store: StorePath) -> None:
# setup
a = np.arange(1050, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 2, 0.5, 0.1, 0.01:
n = int(a.size * p)
ix = np.random.choice(a.shape[0], size=n, replace=True)
_test_get_coordinate_selection(a, z, ix)
ix.sort()
_test_get_coordinate_selection(a, z, ix)
ix = ix[::-1]
_test_get_coordinate_selection(a, z, ix)
selections = [
# test single item
42,
-1,
# test wraparound
[0, 3, 10, -23, -12, -1],
# test out of order
[3, 105, 23, 127], # not monotonically increasing
# test multi-dimensional selection
np.array([[2, 4], [6, 8]]),
]
for selection in selections:
_test_get_coordinate_selection(a, z, selection)
# test errors
bad_selections = coordinate_selections_1d_bad + [
[a.shape[0] + 1], # out of bounds
[-(a.shape[0] + 1)], # out of bounds
]
for selection in bad_selections:
with pytest.raises(IndexError):
z.get_coordinate_selection(selection) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.vindex[selection] # type: ignore[index]
def test_get_coordinate_selection_2d(store: StorePath) -> None:
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
ix0: npt.ArrayLike
ix1: npt.ArrayLike
# test with different degrees of sparseness
for p in 2, 0.5, 0.1, 0.01:
n = int(a.size * p)
ix0 = np.random.choice(a.shape[0], size=n, replace=True)
ix1 = np.random.choice(a.shape[1], size=n, replace=True)
selections = [
# single value
(42, 4),
(-1, -1),
# index both axes with array
(ix0, ix1),
# mixed indexing with array / int
(ix0, 4),
(42, ix1),
(42, 4),
]
for selection in selections:
_test_get_coordinate_selection(a, z, selection)
# not monotonically increasing (first dim)
ix0 = [3, 3, 4, 2, 5]
ix1 = [1, 3, 5, 7, 9]
_test_get_coordinate_selection(a, z, (ix0, ix1))
# not monotonically increasing (second dim)
ix0 = [1, 1, 2, 2, 5]
ix1 = [1, 3, 2, 1, 0]
_test_get_coordinate_selection(a, z, (ix0, ix1))
# multi-dimensional selection
ix0 = np.array([[1, 1, 2], [2, 2, 5]])
ix1 = np.array([[1, 3, 2], [1, 0, 0]])
_test_get_coordinate_selection(a, z, (ix0, ix1))
selection = slice(5, 15), [1, 2, 3]
with pytest.raises(IndexError):
z.get_coordinate_selection(selection) # type:ignore[arg-type]
selection = [1, 2, 3], slice(5, 15)
with pytest.raises(IndexError):
z.get_coordinate_selection(selection) # type:ignore[arg-type]
selection = Ellipsis, [1, 2, 3]
with pytest.raises(IndexError):
z.get_coordinate_selection(selection) # type:ignore[arg-type]
selection = Ellipsis
with pytest.raises(IndexError):
z.get_coordinate_selection(selection) # type:ignore[arg-type]
def _test_set_coordinate_selection(
v: npt.NDArray, a: npt.NDArray, z: Array, selection: CoordinateSelection
) -> None:
for value in 42, v[selection], v[selection].tolist():
# setup expectation
a[:] = 0
a[selection] = value
# test long-form API
z[:] = 0
z.set_coordinate_selection(selection, value)
assert_array_equal(a, z[:])
# test short-form API
z[:] = 0
z.vindex[selection] = value
assert_array_equal(a, z[:])
def test_set_coordinate_selection_1d(store: StorePath) -> None:
# setup
v = np.arange(550, dtype=int)
a = np.empty(v.shape, dtype=v.dtype)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
n = int(a.size * p)
ix = np.random.choice(a.shape[0], size=n, replace=True)
_test_set_coordinate_selection(v, a, z, ix)
# multi-dimensional selection
ix = np.array([[2, 4], [6, 8]])
_test_set_coordinate_selection(v, a, z, ix)
for selection in coordinate_selections_1d_bad:
with pytest.raises(IndexError):
z.set_coordinate_selection(selection, 42) # type:ignore[arg-type]
with pytest.raises(IndexError):
z.vindex[selection] = 42 # type:ignore[index]
def test_set_coordinate_selection_2d(store: StorePath) -> None:
# setup
v = np.arange(5400, dtype=int).reshape(600, 9)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.01:
n = int(a.size * p)
ix0 = np.random.choice(a.shape[0], size=n, replace=True)
ix1 = np.random.choice(a.shape[1], size=n, replace=True)
selections = (
(42, 4),
(-1, -1),
# index both axes with array
(ix0, ix1),
# mixed indexing with array / int
(ix0, 4),
(42, ix1),
)
for selection in selections:
_test_set_coordinate_selection(v, a, z, selection)
# multi-dimensional selection
ix0 = np.array([[1, 2, 3], [4, 5, 6]])
ix1 = np.array([[1, 3, 2], [2, 0, 5]])
_test_set_coordinate_selection(v, a, z, (ix0, ix1))
def _test_get_block_selection(
a: npt.NDArray[Any],
z: Array,
selection: BasicSelection,
expected_idx: slice | tuple[slice, ...],
) -> None:
expect = a[expected_idx]
actual = z.get_block_selection(selection)
assert_array_equal(expect, actual)
actual = z.blocks[selection]
assert_array_equal(expect, actual)
block_selections_1d: list[BasicSelection] = [
# test single item
0,
5,
# test wraparound
-1,
-4,
# test slice
slice(5),
slice(None, 3),
slice(5, 6),
slice(-3, -1),
slice(None), # Full slice
]
block_selections_1d_array_projection: list[slice] = [
# test single item
slice(100),
slice(500, 600),
# test wraparound
slice(1000, None),
slice(700, 800),
# test slice
slice(500),
slice(None, 300),
slice(500, 600),
slice(800, 1000),
slice(None),
]
block_selections_1d_bad = [
# slice not supported
slice(3, 8, 2),
# bad stuff
2.3,
# "foo", # TODO
b"xxx",
None,
(0, 0),
(slice(None), slice(None)),
[0, 5, 3],
]
def test_get_block_selection_1d(store: StorePath) -> None:
# setup
a = np.arange(1050, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
for selection, expected_idx in zip(
block_selections_1d, block_selections_1d_array_projection, strict=True
):
_test_get_block_selection(a, z, selection, expected_idx)
bad_selections = block_selections_1d_bad + [
z.metadata.chunk_grid.get_nchunks(z.shape) + 1, # out of bounds
-(z.metadata.chunk_grid.get_nchunks(z.shape) + 1), # out of bounds
]
for selection_bad in bad_selections:
with pytest.raises(IndexError):
z.get_block_selection(selection_bad) # type:ignore[arg-type]
with pytest.raises(IndexError):
z.blocks[selection_bad] # type:ignore[index]
block_selections_2d: list[BasicSelection] = [
# test single item
(0, 0),
(1, 2),
# test wraparound
(-1, -1),
(-3, -2),
# test slice
(slice(1), slice(2)),
(slice(None, 2), slice(-2, -1)),
(slice(2, 3), slice(-2, None)),
(slice(-3, -1), slice(-3, -2)),
(slice(None), slice(None)), # Full slice
]
block_selections_2d_array_projection: list[tuple[slice, slice]] = [
# test single item
(slice(300), slice(3)),
(slice(300, 600), slice(6, 9)),
# test wraparound
(slice(900, None), slice(9, None)),
(slice(300, 600), slice(6, 9)),
# test slice
(slice(300), slice(6)),
(slice(None, 600), slice(6, 9)),
(slice(600, 900), slice(6, None)),
(slice(300, 900), slice(3, 6)),
(slice(None), slice(None)), # Full slice
]
def test_get_block_selection_2d(store: StorePath) -> None:
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
for selection, expected_idx in zip(
block_selections_2d, block_selections_2d_array_projection, strict=True
):
_test_get_block_selection(a, z, selection, expected_idx)
selection = slice(5, 15), [1, 2, 3]
with pytest.raises(IndexError):
z.get_block_selection(selection)
selection = Ellipsis, [1, 2, 3]
with pytest.raises(IndexError):
z.get_block_selection(selection)
selection = slice(15, 20), slice(None)
with pytest.raises(IndexError): # out of bounds
z.get_block_selection(selection)
def _test_set_block_selection(
v: npt.NDArray[Any],
a: npt.NDArray[Any],
z: zarr.Array,
selection: BasicSelection,
expected_idx: slice,
) -> None:
for value in 42, v[expected_idx], v[expected_idx].tolist():
# setup expectation
a[:] = 0
a[expected_idx] = value
# test long-form API
z[:] = 0
z.set_block_selection(selection, value)
assert_array_equal(a, z[:])
# test short-form API
z[:] = 0
z.blocks[selection] = value
assert_array_equal(a, z[:])
def test_set_block_selection_1d(store: StorePath) -> None:
# setup
v = np.arange(1050, dtype=int)
a = np.empty(v.shape, dtype=v.dtype)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
for selection, expected_idx in zip(
block_selections_1d, block_selections_1d_array_projection, strict=True
):
_test_set_block_selection(v, a, z, selection, expected_idx)
for selection_bad in block_selections_1d_bad:
with pytest.raises(IndexError):
z.set_block_selection(selection_bad, 42) # type:ignore[arg-type]
with pytest.raises(IndexError):
z.blocks[selection_bad] = 42 # type:ignore[index]
def test_set_block_selection_2d(store: StorePath) -> None:
# setup
v = np.arange(10000, dtype=int).reshape(1000, 10)
a = np.empty(v.shape, dtype=v.dtype)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
for selection, expected_idx in zip(
block_selections_2d, block_selections_2d_array_projection, strict=True
):
_test_set_block_selection(v, a, z, selection, expected_idx)
selection = slice(5, 15), [1, 2, 3]
with pytest.raises(IndexError):
z.set_block_selection(selection, 42)
selection = Ellipsis, [1, 2, 3]
with pytest.raises(IndexError):
z.set_block_selection(selection, 42)
selection = slice(15, 20), slice(None)
with pytest.raises(IndexError): # out of bounds
z.set_block_selection(selection, 42)
def _test_get_mask_selection(a: npt.NDArray[Any], z: Array, selection: npt.NDArray) -> None:
expect = a[selection]
actual = z.get_mask_selection(selection)
assert_array_equal(expect, actual)
actual = z.vindex[selection]
assert_array_equal(expect, actual)
actual = z[selection]
assert_array_equal(expect, actual)
mask_selections_1d_bad = [
# slice not supported
slice(5, 15),
slice(None),
Ellipsis,
# bad stuff
2.3,
"foo",
b"xxx",
None,
(0, 0),
(slice(None), slice(None)),
]
# noinspection PyStatementEffect
def test_get_mask_selection_1d(store: StorePath) -> None:
# setup
a = np.arange(1050, dtype=int)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
_test_get_mask_selection(a, z, ix)
# test errors
bad_selections = mask_selections_1d_bad + [
np.zeros(50, dtype=bool), # too short
np.zeros(2000, dtype=bool), # too long
[[True, False], [False, True]], # too many dimensions
]
for selection in bad_selections:
with pytest.raises(IndexError):
z.get_mask_selection(selection) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.vindex[selection] # type:ignore[index]
# noinspection PyStatementEffect
def test_get_mask_selection_2d(store: StorePath) -> None:
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix = np.random.binomial(1, p, size=a.size).astype(bool).reshape(a.shape)
_test_get_mask_selection(a, z, ix)
# test errors
with pytest.raises(IndexError):
z.vindex[np.zeros((1000, 5), dtype=bool)] # too short
with pytest.raises(IndexError):
z.vindex[np.zeros((2000, 10), dtype=bool)] # too long
with pytest.raises(IndexError):
z.vindex[[True, False]] # wrong no. dimensions
def _test_set_mask_selection(
v: npt.NDArray, a: npt.NDArray, z: Array, selection: npt.NDArray
) -> None:
a[:] = 0
z[:] = 0
a[selection] = v[selection]
z.set_mask_selection(selection, v[selection])
assert_array_equal(a, z[:])
z[:] = 0
z.vindex[selection] = v[selection]
assert_array_equal(a, z[:])
z[:] = 0
z[selection] = v[selection]
assert_array_equal(a, z[:])
def test_set_mask_selection_1d(store: StorePath) -> None:
# setup
v = np.arange(1050, dtype=int)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
_test_set_mask_selection(v, a, z, ix)
for selection in mask_selections_1d_bad:
with pytest.raises(IndexError):
z.set_mask_selection(selection, 42) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.vindex[selection] = 42 # type: ignore[index]
def test_set_mask_selection_2d(store: StorePath) -> None:
# setup
v = np.arange(10000, dtype=int).reshape(1000, 10)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix = np.random.binomial(1, p, size=a.size).astype(bool).reshape(a.shape)
_test_set_mask_selection(v, a, z, ix)
def test_get_selection_out(store: StorePath) -> None:
# basic selections
a = np.arange(1050)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
selections = [
slice(50, 150),
slice(0, 1050),
slice(1, 2),
]
for selection in selections:
expect = a[selection]
out = get_ndbuffer_class().from_numpy_array(np.empty(expect.shape))
z.get_basic_selection(selection, out=out)
assert_array_equal(expect, out.as_numpy_array()[:])
with pytest.raises(TypeError):
z.get_basic_selection(Ellipsis, out=[]) # type: ignore[arg-type]
# orthogonal selections
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
selections = [
# index both axes with array
(ix0, ix1),
# mixed indexing with array / slice
(ix0, slice(1, 5)),
(slice(250, 350), ix1),
# mixed indexing with array / int
(ix0, 4),
(42, ix1),
# mixed int array / bool array
(ix0, np.nonzero(ix1)[0]),
(np.nonzero(ix0)[0], ix1),
]
for selection in selections:
expect = oindex(a, selection)
out = get_ndbuffer_class().from_numpy_array(np.zeros(expect.shape, dtype=expect.dtype))
z.get_orthogonal_selection(selection, out=out)
assert_array_equal(expect, out.as_numpy_array()[:])
# coordinate selections
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
n = int(a.size * p)
ix0 = np.random.choice(a.shape[0], size=n, replace=True)
ix1 = np.random.choice(a.shape[1], size=n, replace=True)
selections = [
# index both axes with array
(ix0, ix1),
# mixed indexing with array / int
(ix0, 4),
(42, ix1),
]
for selection in selections:
expect = a[selection]
out = get_ndbuffer_class().from_numpy_array(np.zeros(expect.shape, dtype=expect.dtype))
z.get_coordinate_selection(selection, out=out)
assert_array_equal(expect, out.as_numpy_array()[:])
@pytest.mark.xfail(reason="fields are not supported in v3")
def test_get_selections_with_fields(store: StorePath) -> None:
a = np.array(
[("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)],
dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")],
)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(2,))
fields_fixture: list[str | list[str]] = [
"foo",
["foo"],
["foo", "bar"],
["foo", "baz"],
["bar", "baz"],
["foo", "bar", "baz"],
["bar", "foo"],
["baz", "bar", "foo"],
]
for fields in fields_fixture:
# total selection
expect = a[fields]
actual = z.get_basic_selection(Ellipsis, fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z[fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z[fields[0], fields[1]]
assert_array_equal(expect, actual)
if isinstance(fields, str):
actual = z[..., fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z[..., fields[0], fields[1]]
assert_array_equal(expect, actual)
# basic selection with slice
expect = a[fields][0:2]
actual = z.get_basic_selection(slice(0, 2), fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z[0:2, fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z[0:2, fields[0], fields[1]]
assert_array_equal(expect, actual)
# basic selection with single item
expect = a[fields][1]
actual = z.get_basic_selection(1, fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z[1, fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z[1, fields[0], fields[1]]
assert_array_equal(expect, actual)
# orthogonal selection
ix = [0, 2]
expect = a[fields][ix]
actual = z.get_orthogonal_selection(ix, fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z.oindex[ix, fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z.oindex[ix, fields[0], fields[1]]
assert_array_equal(expect, actual)
# coordinate selection
ix = [0, 2]
expect = a[fields][ix]
actual = z.get_coordinate_selection(ix, fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z.vindex[ix, fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z.vindex[ix, fields[0], fields[1]]
assert_array_equal(expect, actual)
# mask selection
ix = [True, False, True]
expect = a[fields][ix]
actual = z.get_mask_selection(ix, fields=fields)
assert_array_equal(expect, actual)
# alternative API
if isinstance(fields, str):
actual = z.vindex[ix, fields]
assert_array_equal(expect, actual)
elif len(fields) == 2:
actual = z.vindex[ix, fields[0], fields[1]]
assert_array_equal(expect, actual)
# missing/bad fields
with pytest.raises(IndexError):
z.get_basic_selection(Ellipsis, fields=["notafield"])
with pytest.raises(IndexError):
z.get_basic_selection(Ellipsis, fields=slice(None)) # type: ignore[arg-type]
@pytest.mark.xfail(reason="fields are not supported in v3")
def test_set_selections_with_fields(store: StorePath) -> None:
v = np.array(
[("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)],
dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")],
)
a = np.empty_like(v)
z = zarr_array_from_numpy_array(store, v, chunk_shape=(2,))
fields_fixture: list[str | list[str]] = [
"foo",
[],
["foo"],
["foo", "bar"],
["foo", "baz"],
["bar", "baz"],
["foo", "bar", "baz"],
["bar", "foo"],
["baz", "bar", "foo"],
]
for fields in fields_fixture:
# currently multi-field assignment is not supported in numpy, so we won't support
# it either
if isinstance(fields, list) and len(fields) > 1:
with pytest.raises(IndexError):
z.set_basic_selection(Ellipsis, v, fields=fields)
with pytest.raises(IndexError):
z.set_orthogonal_selection([0, 2], v, fields=fields) # type: ignore[arg-type]
with pytest.raises(IndexError):
z.set_coordinate_selection([0, 2], v, fields=fields)
with pytest.raises(IndexError):
z.set_mask_selection([True, False, True], v, fields=fields) # type: ignore[arg-type]
else:
if isinstance(fields, list) and len(fields) == 1:
# work around numpy does not support multi-field assignment even if there
# is only one field
key = fields[0]
elif isinstance(fields, list) and len(fields) == 0:
# work around numpy ambiguity about what is a field selection
key = Ellipsis
else:
key = fields
# setup expectation
a[:] = ("", 0, 0)
z[:] = ("", 0, 0)
assert_array_equal(a, z[:])
a[key] = v[key]
# total selection
z.set_basic_selection(Ellipsis, v[key], fields=fields)
assert_array_equal(a, z[:])
# basic selection with slice
a[:] = ("", 0, 0)
z[:] = ("", 0, 0)
a[key][0:2] = v[key][0:2]
z.set_basic_selection(slice(0, 2), v[key][0:2], fields=fields)
assert_array_equal(a, z[:])
# orthogonal selection
a[:] = ("", 0, 0)
z[:] = ("", 0, 0)
ix = [0, 2]
a[key][ix] = v[key][ix]
z.set_orthogonal_selection(ix, v[key][ix], fields=fields)
assert_array_equal(a, z[:])
# coordinate selection
a[:] = ("", 0, 0)
z[:] = ("", 0, 0)
ix = [0, 2]
a[key][ix] = v[key][ix]
z.set_coordinate_selection(ix, v[key][ix], fields=fields)
assert_array_equal(a, z[:])
# mask selection
a[:] = ("", 0, 0)
z[:] = ("", 0, 0)
ix = [True, False, True]
a[key][ix] = v[key][ix]
z.set_mask_selection(ix, v[key][ix], fields=fields)
assert_array_equal(a, z[:])
def test_slice_selection_uints() -> None:
arr = np.arange(24).reshape((4, 6))
idx = np.uint64(3)
slice_sel = make_slice_selection((idx,))
assert arr[tuple(slice_sel)].shape == (1, 6)
def test_numpy_int_indexing(store: StorePath) -> None:
a = np.arange(1050)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,))
assert a[42] == z[42]
assert a[np.int64(42)] == z[np.int64(42)]
@pytest.mark.parametrize(
("shape", "chunks", "ops"),
[
# 1D test cases
((1070,), (50,), [("__getitem__", (slice(200, 400),))]),
((1070,), (50,), [("__getitem__", (slice(200, 400, 100),))]),
(
(1070,),
(50,),
[
("__getitem__", (slice(200, 400),)),
("__setitem__", (slice(200, 400, 100),)),
],
),
# 2D test cases
(
(40, 50),
(5, 8),
[
("__getitem__", (slice(6, 37, 13), (slice(4, 10)))),
("__setitem__", (slice(None), (slice(None)))),
],
),
],
)
async def test_accessed_chunks(
shape: tuple[int, ...], chunks: tuple[int, ...], ops: list[tuple[str, tuple[slice, ...]]]
) -> None:
# Test that only the required chunks are accessed during basic selection operations
# shape: array shape
# chunks: chunk size
# ops: list of tuples with (optype, tuple of slices)
# optype = "__getitem__" or "__setitem__", tuple length must match number of dims
# Use a counting dict as the backing store so we can track the items access
store = await CountingDict.open()
z = zarr_array_from_numpy_array(StorePath(store), np.zeros(shape), chunk_shape=chunks)
for ii, (optype, slices) in enumerate(ops):
# Resolve the slices into the accessed chunks for each dimension
chunks_per_dim = []
for N, C, sl in zip(shape, chunks, slices, strict=True):
chunk_ind = np.arange(N, dtype=int)[sl] // C
chunks_per_dim.append(np.unique(chunk_ind))
# Combine and generate the cartesian product to determine the chunks keys that
# will be accessed
chunks_accessed = [".".join(map(str, comb)) for comb in itertools.product(*chunks_per_dim)]
counts_before = store.counter.copy()
# Perform the operation
if optype == "__getitem__":
z[slices]
else:
z[slices] = ii
# Get the change in counts
delta_counts = store.counter - counts_before
# Check that the access counts for the operation have increased by one for all
# the chunks we expect to be included
for ci in chunks_accessed:
assert delta_counts.pop((optype, ci)) == 1
# If the chunk was partially written to it will also have been read once. We
# don't determine if the chunk was actually partial here, just that the
# counts are consistent that this might have happened
if optype == "__setitem__":
assert ("__getitem__", ci) not in delta_counts or delta_counts.pop(
("__getitem__", ci)
) == 1
# Check that no other chunks were accessed
assert len(delta_counts) == 0
@pytest.mark.parametrize(
"selection",
[
# basic selection
[...],
[1, ...],
[slice(None)],
[1, 3],
[[1, 2, 3], 9],
[np.arange(1000)],
[slice(5, 15)],
[slice(2, 4), 4],
[[1, 3]],
# mask selection
[np.tile([True, False], (1000, 5))],
[np.full((1000, 10), False)],
# coordinate selection
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[100, 200, 300], [4, 5, 6]],
],
)
def test_indexing_equals_numpy(store: StorePath, selection: Selection) -> None:
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
# note: in python 3.10 a[*selection] is not valid unpacking syntax
expected = a[*selection,]
actual = z[*selection,]
assert_array_equal(expected, actual, err_msg=f"selection: {selection}")
@pytest.mark.parametrize(
"selection",
[
[np.tile([True, False], 500), np.tile([True, False], 5)],
[np.full(1000, False), np.tile([True, False], 5)],
[np.full(1000, True), np.full(10, True)],
[np.full(1000, True), [True, False] * 5],
],
)
def test_orthogonal_bool_indexing_like_numpy_ix(
store: StorePath, selection: list[npt.ArrayLike]
) -> None:
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3))
expected = a[np.ix_(*selection)]
# note: in python 3.10 z[*selection] is not valid unpacking syntax
actual = z[*selection,]
assert_array_equal(expected, actual, err_msg=f"{selection=}")
@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("origin_0d", [None, (0,), (1,)])
@pytest.mark.parametrize("selection_shape_0d", [None, (2,), (3,)])
def test_iter_grid(
ndim: int, origin_0d: tuple[int] | None, selection_shape_0d: tuple[int] | None
) -> None:
"""
Test that iter_grid works as expected for 1, 2, and 3 dimensions.
"""
grid_shape = (10, 5, 7)[:ndim]
if origin_0d is not None:
origin_kwarg = origin_0d * ndim
origin = origin_kwarg
else:
origin_kwarg = None
origin = (0,) * ndim
if selection_shape_0d is not None:
selection_shape_kwarg = selection_shape_0d * ndim
selection_shape = selection_shape_kwarg
else:
selection_shape_kwarg = None
selection_shape = tuple(gs - o for gs, o in zip(grid_shape, origin, strict=False))
observed = tuple(
_iter_grid(grid_shape, origin=origin_kwarg, selection_shape=selection_shape_kwarg)
)
# generate a numpy array of indices, and index it
coord_array = np.array(list(itertools.product(*[range(s) for s in grid_shape]))).reshape(
(*grid_shape, ndim)
)
coord_array_indexed = coord_array[
tuple(slice(o, o + s, 1) for o, s in zip(origin, selection_shape, strict=False))
+ (range(ndim),)
]
expected = tuple(map(tuple, coord_array_indexed.reshape(-1, ndim).tolist()))
assert observed == expected
def test_iter_grid_invalid() -> None:
"""
Ensure that a selection_shape that exceeds the grid_shape + origin produces an indexing error.
"""
with pytest.raises(IndexError):
list(_iter_grid((5,), origin=(0,), selection_shape=(10,)))
def test_indexing_with_zarr_array(store: StorePath) -> None:
# regression test for https://github.com/zarr-developers/zarr-python/issues/2133
a = np.arange(10)
za = zarr.array(a, chunks=2, store=store, path="a")
ix = [False, True, False, True, False, True, False, True, False, True]
ii = [0, 2, 4, 5]
zix = zarr.array(ix, chunks=2, store=store, dtype="bool", path="ix")
zii = zarr.array(ii, chunks=2, store=store, dtype="i4", path="ii")
assert_array_equal(a[ix], za[zix])
assert_array_equal(a[ix], za.oindex[zix])
assert_array_equal(a[ix], za.vindex[zix])
assert_array_equal(a[ii], za[zii])
assert_array_equal(a[ii], za.oindex[zii])
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
@pytest.mark.parametrize("shape", [(0, 2, 3), (0), (3, 0)])
def test_zero_sized_chunks(store: StorePath, shape: list[int]) -> None:
z = zarr.create_array(store=store, shape=shape, chunks=shape, zarr_format=3, dtype="f8")
z[...] = 42
assert_array_equal(z[...], np.zeros(shape, dtype="f8"))
@pytest.mark.parametrize("store", ["memory"], indirect=["store"])
def test_vectorized_indexing_incompatible_shape(store) -> None:
# GH2469
shape = (4, 4)
chunks = (2, 2)
fill_value = 32767
arr = zarr.create(
shape,
store=store,
chunks=chunks,
dtype=np.int16,
fill_value=fill_value,
codecs=[zarr.codecs.BytesCodec(), zarr.codecs.BloscCodec()],
)
with pytest.raises(ValueError, match="Attempting to set"):
arr[np.array([1, 2]), np.array([1, 2])] = np.array([[-1, -2], [-3, -4]])
def test_iter_chunk_regions():
chunks = (2, 3)
a = zarr.create((10, 10), chunks=chunks)
a[:] = 1
for region in a._iter_chunk_regions():
assert_array_equal(a[region], np.ones_like(a[region]))
a[region] = 0
assert_array_equal(a[region], np.zeros_like(a[region]))
@pytest.mark.parametrize(
("domain_shape", "region_shape", "origin", "selection_shape"),
[
((9,), (1,), None, (9,)),
((9,), (1,), (0,), (9,)),
((3,), (2,), (0,), (1,)),
((9,), (2,), (2,), (2,)),
((9, 9), (2, 1), None, None),
((9, 9), (4, 1), None, None),
],
)
@pytest.mark.parametrize("order", ["lexicographic"])
@pytest.mark.parametrize("trim_excess", [True, False])
def test_iter_regions(
domain_shape: tuple[int, ...],
region_shape: tuple[int, ...],
origin: tuple[int, ...] | None,
selection_shape: tuple[int, ...] | None,
order: _ArrayIndexingOrder,
trim_excess: bool,
) -> None:
"""
Test that iter_regions properly iterates over contiguous regions of a gridded domain.
"""
expected_slices_by_dim: list[list[slice]] = []
origin_parsed: tuple[int, ...]
selection_shape_parsed: tuple[int, ...]
if origin is None:
origin_parsed = (0,) * len(domain_shape)
else:
origin_parsed = origin
if selection_shape is None:
selection_shape_parsed = tuple(
ceildiv(ds, rs) - o
for ds, o, rs in zip(domain_shape, origin_parsed, region_shape, strict=True)
)
else:
selection_shape_parsed = selection_shape
for d_s, r_s, o, ss in zip(
domain_shape, region_shape, origin_parsed, selection_shape_parsed, strict=True
):
_expected_slices: list[slice] = []
start = o * r_s
for incr in range(start, start + ss * r_s, r_s):
if trim_excess:
term = min(incr + r_s, d_s)
else:
term = incr + r_s
_expected_slices.append(slice(incr, term, 1))
expected_slices_by_dim.append(_expected_slices)
expected = tuple(itertools.product(*expected_slices_by_dim))
observed = tuple(
_iter_regions(
domain_shape,
region_shape,
origin=origin,
selection_shape=selection_shape,
order=order,
trim_excess=trim_excess,
)
)
assert observed == expected
| CountingDict |
python | jina-ai__jina | jina/importer.py | {
"start": 253,
"end": 5543
} | class ____:
"""
A context manager for wrapping extension import and fallback. It guides the user to pip install correct package by looking up extra-requirements.txt.
:param required: set to True if you want to raise the ModuleNotFound error
:param logger: when not given, built-in warnings.warn will be used
:param help_text: the help text followed after
:param pkg_name: the package name to find in extra_requirements.txt, when not given the ModuleNotFound exec_val will be used as the best guess
"""
def __init__(
self,
required: bool,
logger=None,
help_text: Optional[str] = None,
pkg_name: Optional[str] = None,
verbose: bool = True,
):
self._required = required
self._tags = []
self._help_text = help_text
self._logger = logger
self._pkg_name = pkg_name
self._verbose = verbose
def __enter__(self):
return self
def _check_v(self, v, missing_module):
if (
v.strip()
and not v.startswith('#')
and v.startswith(missing_module)
and ':' in v
):
return True
def _find_missing_module_in_extra_req(self, missing_module):
with open(
os.path.join(__resources_path__, 'extra-requirements.txt'), encoding='utf-8'
) as fp:
for v in fp:
if self._check_v(v, missing_module):
missing_module, install_tags = v.split(':')
self._tags.append(missing_module)
self._tags.extend(vv.strip() for vv in install_tags.split(','))
break
def _find_missing_module(self, exc_val):
missing_module = self._pkg_name or exc_val.name
missing_module = self._find_missing_module_in_extra_req(missing_module)
return missing_module
def _err_msg(self, exc_val, missing_module):
if self._tags:
from jina.helper import colored
req_msg = colored('fallback to default behavior', color='yellow')
if self._required:
req_msg = colored('and it is required', color='red')
err_msg = f'''Python package "{colored(missing_module, attrs='bold')}" is not installed, {req_msg}.
You are trying to use a feature not enabled by your current Jina installation.'''
avail_tags = ' '.join(
colored(f'[{tag}]', attrs='bold') for tag in self._tags
)
err_msg += (
f'\n\nTo enable this feature, use {colored("pip install jina[TAG]", attrs="bold")}, '
f'where {colored("[TAG]", attrs="bold")} is one of {avail_tags}.\n'
)
else:
err_msg = f'{exc_val.msg}'
return err_msg
def _log_critical(self, err_msg):
if self._verbose and self._logger:
self._logger.critical(err_msg)
if self._help_text:
self._logger.error(self._help_text)
def _log_warning(self, err_msg):
if self._verbose and self._logger:
self._logger.warning(err_msg)
if self._help_text:
self._logger.info(self._help_text)
def _raise_or_supress(self, err_msg, exc_val):
if self._verbose and not self._logger:
warnings.warn(err_msg, RuntimeWarning, stacklevel=2)
if self._required:
self._log_critical(err_msg)
raise exc_val
else:
self._log_warning(err_msg)
return True # suppress the error
def __exit__(self, exc_type, exc_val, traceback):
if exc_type != ModuleNotFoundError:
return
missing_module = self._find_missing_module(exc_val)
err_msg = self._err_msg(exc_val, missing_module)
return self._raise_or_supress(err_msg, exc_val)
def _path_import(absolute_path: str):
import importlib.util
try:
# I dont want to trust user path based on directory structure, "user_module", period
default_spec_name = 'user_module'
user_module_name = os.path.splitext(os.path.basename(absolute_path))[0]
if user_module_name == '__init__':
# __init__ can not be used as a module name
spec_name = default_spec_name
elif user_module_name not in sys.modules:
spec_name = user_module_name
else:
warnings.warn(
f'''
{user_module_name} shadows one of built-in Python module name.
It is imported as `{default_spec_name}.{user_module_name}`
Affects:
- Either, change your code from using `from {user_module_name} import ...`
to `from {default_spec_name}.{user_module_name} import ...`
- Or, rename {user_module_name} to another name
'''
)
spec_name = f'{default_spec_name}.{user_module_name}'
spec = importlib.util.spec_from_file_location(spec_name, absolute_path)
module = importlib.util.module_from_spec(spec)
sys.modules[spec_name] = module
spec.loader.exec_module(module)
except Exception as ex:
raise ImportError(f'can not import module from {absolute_path}') from ex
| ImportExtensions |
python | kamyu104__LeetCode-Solutions | Python/time-needed-to-inform-all-employees.py | {
"start": 895,
"end": 1598
} | class ____(object):
def numOfMinutes(self, n, headID, manager, informTime):
"""
:type n: int
:type headID: int
:type manager: List[int]
:type informTime: List[int]
:rtype: int
"""
def dfs(informTime, children, node):
return (max(dfs(informTime, children, c)
for c in children[node])
if node in children
else 0) + informTime[node]
children = collections.defaultdict(list)
for child, parent in enumerate(manager):
if parent != -1:
children[parent].append(child)
return dfs(informTime, children, headID)
| Solution2 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/clsregistry.py | {
"start": 10222,
"end": 10998
} | class ____:
__slots__ = ("__parent",)
__parent: _ModuleMarker
def __init__(self, parent: _ModuleMarker):
self.__parent = parent
def __getattr__(self, key: str) -> Union[_ModNS, Type[Any]]:
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise NameError(
"Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key)
)
| _ModNS |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 18796,
"end": 20726
} | class ____:
"""Check input overwrite behavior."""
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = (f"{routine.__name__}({x.dtype}{x.shape!r}, {fftsize!r}, "
f"axis={axis!r}, overwrite_x={overwrite_x!r})")
if not overwrite_x:
assert_equal(x2, x, err_msg=f"spurious overwrite in {sig}")
def _check_1d(self, routine, dtype, shape, axis):
rng = np.random.RandomState(1234)
if np.issubdtype(dtype, np.complexfloating):
data = rng.randn(*shape) + 1j*rng.randn(*shape)
else:
data = rng.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3, 4]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
self._check(data, routine, type, None, axis, norm,
overwrite_x)
def test_dct(self):
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1)
self._check_1d(dct, dtype, (16, 2), 0)
self._check_1d(dct, dtype, (2, 16), 1)
def test_idct(self):
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1)
self._check_1d(idct, dtype, (16, 2), 0)
self._check_1d(idct, dtype, (2, 16), 1)
def test_dst(self):
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1)
self._check_1d(dst, dtype, (16, 2), 0)
self._check_1d(dst, dtype, (2, 16), 1)
def test_idst(self):
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1)
self._check_1d(idst, dtype, (16, 2), 0)
self._check_1d(idst, dtype, (2, 16), 1)
| TestOverwrite |
python | scrapy__scrapy | scrapy/core/scraper.py | {
"start": 1721,
"end": 3297
} | class ____:
"""Scraper slot (one per running spider)"""
MIN_RESPONSE_SIZE = 1024
def __init__(self, max_active_size: int = 5000000):
self.max_active_size: int = max_active_size
self.queue: deque[QueueTuple] = deque()
self.active: set[Request] = set()
self.active_size: int = 0
self.itemproc_size: int = 0
self.closing: Deferred[Spider] | None = None
def add_response_request(
self, result: Response | Failure, request: Request
) -> Deferred[None]:
# this Deferred will be awaited in enqueue_scrape()
deferred: Deferred[None] = Deferred()
self.queue.append((result, request, deferred))
if isinstance(result, Response):
self.active_size += max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size += self.MIN_RESPONSE_SIZE
return deferred
def next_response_request_deferred(self) -> QueueTuple:
result, request, deferred = self.queue.popleft()
self.active.add(request)
return result, request, deferred
def finish_response(self, result: Response | Failure, request: Request) -> None:
self.active.remove(request)
if isinstance(result, Response):
self.active_size -= max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size -= self.MIN_RESPONSE_SIZE
def is_idle(self) -> bool:
return not (self.queue or self.active)
def needs_backout(self) -> bool:
return self.active_size > self.max_active_size
| Slot |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 28236,
"end": 28830
} | class ____:
"""Parent class to function arguments, including both column references and values"""
def __init__(self, name: str):
self.name = name
self.has_default = False
def get_default(self, _) -> object:
raise InvalidFunctionArgument(f"{self.name} has no defaults")
def normalize(
self, value: str, params: ParamsType, combinator: Combinator | None
) -> str | float | datetime | list[Any] | None:
return value
def get_type(self, _) -> str:
raise InvalidFunctionArgument(f"{self.name} has no type defined")
| FunctionArg |
python | facebookresearch__faiss | tests/test_rabitq.py | {
"start": 59137,
"end": 61126
} | class ____(unittest.TestCase):
"""Test serialization/deserialization preserves behavior."""
def do_test_serialization(self, metric, nb_bits, qb):
"""Test that serialize/deserialize preserves search results."""
ds = create_test_dataset(d=64, nb=200, nq=10, nt=150)
k = 5
# Create and populate index
index1 = create_index_rabitq_with_rotation(
ds.d, metric, nb_bits, qb=qb
)
index1.train(ds.get_train())
index1.add(ds.get_database())
# Search before serialization
D1, I1 = index1.search(ds.get_queries(), k)
# Serialize and deserialize
index_bytes = faiss.serialize_index(index1)
index2 = faiss.deserialize_index(index_bytes)
# Assert: Parameters preserved
self.assertEqual(index2.d, ds.d)
self.assertEqual(index2.ntotal, ds.nb)
self.assertTrue(index2.is_trained)
# Search after deserialization using search parameters
params = faiss.RaBitQSearchParameters()
params.qb = qb
params.centered = False
D2, I2 = index2.search(ds.get_queries(), k, params=params)
# Assert: Results are identical
np.testing.assert_array_equal(
I1, I2, err_msg=f"Indices mismatch for nb_bits={nb_bits}, qb={qb}"
)
np.testing.assert_allclose(
D1,
D2,
rtol=1e-5,
err_msg=f"Distances mismatch for nb_bits={nb_bits}, qb={qb}",
)
def test_serialization_all_nb_bits(self):
"""Test serialization for all nb_bits values."""
# Test all nb_bits including edge cases (1, 9)
for metric in [faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT]:
for nb_bits in [1, 2, 4, 8, 9]:
for qb in [0, 4, 8]:
with self.subTest(metric=metric, nb_bits=nb_bits, qb=qb):
self.do_test_serialization(metric, nb_bits, qb)
| TestMultiBitRaBitQSerialization |
python | great-expectations__great_expectations | great_expectations/render/renderer/column_section_renderer.py | {
"start": 17936,
"end": 20435
} | class ____(ColumnSectionRenderer):
def __init__(self, bullet_list_renderer=None) -> None:
super().__init__()
if bullet_list_renderer is None:
bullet_list_renderer = {"class_name": "ExpectationSuiteBulletListContentBlockRenderer"}
module_name = bullet_list_renderer.get(
"module_name", "great_expectations.render.renderer.content_block"
)
verify_dynamic_loading_support(module_name=module_name)
class_name = bullet_list_renderer.get("class_name")
self._bullet_list_renderer = load_class(class_name=class_name, module_name=module_name)
@classmethod
def _render_header(cls, expectations):
column = cls._get_column_name(expectations)
new_block = RenderedHeaderContent(
**{
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": convert_to_string_and_escape(column),
"tag": "h5",
"styling": {"classes": ["m-0"]},
},
}
),
"styling": {
"classes": ["col-12"],
"header": {"classes": ["alert", "alert-secondary"]},
},
}
)
return expectations, new_block
def _render_bullet_list(self, expectations):
new_block = self._bullet_list_renderer.render(
expectations,
include_column_name=False,
)
return [], new_block
def render(self, expectations): # type: ignore[explicit-override] # FIXME
column = self._get_column_name(expectations)
content_blocks = []
remaining_expectations, header_block = self._render_header(expectations)
content_blocks.append(header_block)
# remaining_expectations, content_blocks = cls._render_column_type(
# remaining_expectations, content_blocks)
remaining_expectations, bullet_block = self._render_bullet_list(remaining_expectations)
content_blocks.append(bullet_block)
# NOTE : Some render* functions return None so we filter them out
populated_content_blocks = list(filter(None, content_blocks))
return RenderedSectionContent(section_name=column, content_blocks=populated_content_blocks)
| ExpectationSuiteColumnSectionRenderer |
python | PrefectHQ__prefect | src/prefect/events/schemas/automations.py | {
"start": 652,
"end": 755
} | class ____(AutoEnum):
Reactive = "Reactive"
Proactive = "Proactive"
Metric = "Metric"
| Posture |
python | Netflix__metaflow | metaflow/plugins/aws/step_functions/step_functions_client.py | {
"start": 125,
"end": 4754
} | class ____(object):
def __init__(self):
from ..aws_client import get_aws_client
self._client = get_aws_client("stepfunctions")
def search(self, name):
paginator = self._client.get_paginator("list_state_machines")
return next(
(
state_machine
for page in paginator.paginate()
for state_machine in page["stateMachines"]
if state_machine["name"] == name
),
None,
)
def push(self, name, definition, role_arn, log_execution_history):
try:
response = self._client.create_state_machine(
name=name,
definition=definition,
roleArn=role_arn,
loggingConfiguration=self._default_logging_configuration(
log_execution_history
),
)
state_machine_arn = response["stateMachineArn"]
except self._client.exceptions.StateMachineAlreadyExists as e:
# State Machine already exists, update it instead of creating it.
state_machine_arn = e.response["Error"]["Message"].split("'")[1]
self._client.update_state_machine(
stateMachineArn=state_machine_arn,
definition=definition,
roleArn=role_arn,
loggingConfiguration=self._default_logging_configuration(
log_execution_history
),
)
return state_machine_arn
def get(self, name):
state_machine_arn = self.get_state_machine_arn(name)
if state_machine_arn is None:
return None
try:
return self._client.describe_state_machine(
stateMachineArn=state_machine_arn,
)
except self._client.exceptions.StateMachineDoesNotExist:
return None
def trigger(self, state_machine_arn, input):
return self._client.start_execution(
stateMachineArn=state_machine_arn, input=input
)
def list_executions(self, state_machine_arn, states):
if len(states) > 0:
return (
execution
for state in states
for page in self._client.get_paginator("list_executions").paginate(
stateMachineArn=state_machine_arn, statusFilter=state
)
for execution in page["executions"]
)
return (
execution
for page in self._client.get_paginator("list_executions").paginate(
stateMachineArn=state_machine_arn
)
for execution in page["executions"]
)
def terminate_execution(self, execution_arn):
try:
response = self._client.stop_execution(executionArn=execution_arn)
return response
except self._client.exceptions.ExecutionDoesNotExist:
raise ValueError("The execution ARN %s does not exist." % execution_arn)
except Exception as e:
raise e
def _default_logging_configuration(self, log_execution_history):
if log_execution_history:
return {
"level": "ALL",
"includeExecutionData": True,
"destinations": [
{
"cloudWatchLogsLogGroup": {
"logGroupArn": SFN_EXECUTION_LOG_GROUP_ARN
}
}
],
}
else:
return {"level": "OFF"}
def get_state_machine_arn(self, name):
if AWS_SANDBOX_ENABLED:
# We can't execute list_state_machines within the sandbox,
# but we can construct the statemachine arn since we have
# explicit access to the region.
from ..aws_client import get_aws_client
account_id = get_aws_client("sts").get_caller_identity().get("Account")
region = AWS_SANDBOX_REGION
# Sandboxes are in aws partition
return "arn:aws:states:%s:%s:stateMachine:%s" % (region, account_id, name)
else:
state_machine = self.search(name)
if state_machine:
return state_machine["stateMachineArn"]
return None
def delete(self, name):
state_machine_arn = self.get_state_machine_arn(name)
if state_machine_arn is None:
return None
return self._client.delete_state_machine(
stateMachineArn=state_machine_arn,
)
| StepFunctionsClient |
python | getsentry__sentry | src/sentry/monitors/endpoints/project_monitor_checkin_index.py | {
"start": 786,
"end": 1720
} | class ____(ProjectMonitorEndpoint, MonitorCheckInMixin):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.CRONS
@extend_schema(
operation_id="Retrieve Check-Ins for a Monitor by Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
],
responses={
200: inline_sentry_response_serializer(
"CheckInList", list[MonitorCheckInSerializerResponse]
),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, project, monitor) -> Response:
"""
Retrieve a list of check-ins for a monitor
"""
return self.get_monitor_checkins(request, project, monitor)
| ProjectMonitorCheckInIndexEndpoint |
python | ray-project__ray | rllib/offline/io_context.py | {
"start": 330,
"end": 2543
} | class ____:
"""Class containing attributes to pass to input/output class constructors.
RLlib auto-sets these attributes when constructing input/output classes,
such as InputReaders and OutputWriters.
"""
@PublicAPI
def __init__(
self,
log_dir: Optional[str] = None,
config: Optional["AlgorithmConfig"] = None,
worker_index: int = 0,
worker: Optional["RolloutWorker"] = None,
):
"""Initializes a IOContext object.
Args:
log_dir: The logging directory to read from/write to.
config: The (main) AlgorithmConfig object.
worker_index: When there are multiple workers created, this
uniquely identifies the current worker. 0 for the local
worker, >0 for any of the remote workers.
worker: The RolloutWorker object reference.
"""
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
self.log_dir = log_dir or os.getcwd()
# In case no config is provided, use the default one, but set
# `actions_in_input_normalized=True` if we don't have a worker.
# Not having a worker and/or a config should only be the case in some test
# cases, though.
self.config = config or AlgorithmConfig().offline_data(
actions_in_input_normalized=worker is None
).training(train_batch_size=1)
self.worker_index = worker_index
self.worker = worker
@PublicAPI
def default_sampler_input(self) -> Optional["SamplerInput"]:
"""Returns the RolloutWorker's SamplerInput object, if any.
Returns None if the RolloutWorker has no SamplerInput. Note that local
workers in case there are also one or more remote workers by default
do not create a SamplerInput object.
Returns:
The RolloutWorkers' SamplerInput object or None if none exists.
"""
return self.worker.sampler
@property
@PublicAPI
def input_config(self):
return self.config.get("input_config", {})
@property
@PublicAPI
def output_config(self):
return self.config.get("output_config", {})
| IOContext |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable.py | {
"start": 3312,
"end": 3347
} | class ____:
""" No op """
| Ancestor |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_views.py | {
"start": 12455,
"end": 14338
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(Project, slug="project", users=[self.user], repo="https://github.com/user/repo")
self.url = reverse("projects_edit", args=[self.project.slug])
self.client.force_login(self.user)
@mock.patch("readthedocs.projects.forms.trigger_build")
@mock.patch("readthedocs.projects.forms.index_project")
def test_search_indexing_enabled(self, index_project, trigger_build):
resp = self.client.get(self.url)
assert resp.status_code == 200
form = resp.context["form"]
assert "search_indexing_enabled" not in form.fields
self.project.search_indexing_enabled = False
self.project.save()
resp = self.client.get(self.url)
assert resp.status_code == 200
form = resp.context["form"]
assert "search_indexing_enabled" in form.fields
data = {
"name": self.project.name,
"repo": self.project.repo,
"language": self.project.language,
"default_version": self.project.default_version,
"versioning_scheme": self.project.versioning_scheme,
}
data["search_indexing_enabled"] = False
resp = self.client.post(
self.url,
data=data,
)
assert resp.status_code == 302
self.project.refresh_from_db()
assert not self.project.search_indexing_enabled
index_project.delay.assert_not_called()
data["search_indexing_enabled"] = True
resp = self.client.post(
self.url,
data=data,
)
assert resp.status_code == 302
self.project.refresh_from_db()
assert self.project.search_indexing_enabled
index_project.delay.assert_called_once_with(project_slug=self.project.slug)
| TestProjectEditView |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 6570,
"end": 6679
} | class ____(_Trimmable):
"""Raised when a test fails a health check. See |HealthCheck|."""
| FailedHealthCheck |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 62286,
"end": 63928
} | class ____(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| SequenceClassifierOutput |
python | euske__pdfminer | pdfminer/rijndael.py | {
"start": 44715,
"end": 45443
} | class ____:
"""
>>> key = bytes.fromhex('00010203050607080a0b0c0d0f101112')
>>> ciphertext = bytes.fromhex('d8f532538289ef7d06b506a4fd5be9c9')
>>> RijndaelDecryptor(key, 128).decrypt(ciphertext).hex()
'506812a45f08c889b97f5980038b8359'
"""
def __init__(self, key, keybits=256):
assert len(key) == KEYLENGTH(keybits)
(self.rk, self.nrounds) = rijndaelSetupDecrypt(key, keybits)
assert len(self.rk) == RKLENGTH(keybits)
assert self.nrounds == NROUNDS(keybits)
return
def decrypt(self, ciphertext):
assert len(ciphertext) == 16
return rijndaelDecrypt(self.rk, self.nrounds, ciphertext)
# encrypt(key, fin, fout, keybits=256)
| RijndaelDecryptor |
python | getsentry__sentry | src/sentry/dashboards/endpoints/organization_dashboard_details.py | {
"start": 2725,
"end": 7887
} | class ____(OrganizationDashboardBase):
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve an Organization's Custom Dashboard",
parameters=[GlobalParams.ORG_ID_OR_SLUG, DashboardParams.DASHBOARD_ID],
responses={
200: DashboardDetailsModelSerializer,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DashboardExamples.DASHBOARD_GET_RESPONSE,
)
def get(
self, request: Request, organization: Organization, dashboard: Dashboard | dict[Any, Any]
) -> Response:
"""
Return details about an organization's custom dashboard.
"""
if not features.has(READ_FEATURE, organization, actor=request.user):
return Response(status=404)
if isinstance(dashboard, dict):
return self.respond(dashboard)
return self.respond(serialize(dashboard, request.user))
@extend_schema(
operation_id="Delete an Organization's Custom Dashboard",
parameters=[GlobalParams.ORG_ID_OR_SLUG, DashboardParams.DASHBOARD_ID],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(
self, request: Request, organization: Organization, dashboard: Dashboard | dict[Any, Any]
) -> Response:
"""
Delete an organization's custom dashboard, or tombstone
a pre-built dashboard which effectively deletes it.
"""
if not features.has(EDIT_FEATURE, organization, actor=request.user):
return Response(status=404)
self.check_object_permissions(request, dashboard)
num_dashboards = Dashboard.objects.filter(organization=organization).count()
num_tombstones = DashboardTombstone.objects.filter(organization=organization).count()
if isinstance(dashboard, Dashboard) and dashboard.prebuilt_id is not None:
return self.respond({"Cannot delete prebuilt Dashboards."}, status=409)
if isinstance(dashboard, dict):
if num_dashboards > 0:
DashboardTombstone.objects.get_or_create(
organization=organization, slug=dashboard["id"]
)
else:
return self.respond({"Cannot delete last Dashboard."}, status=409)
elif (num_dashboards > 1) or (num_tombstones == 0):
dashboard.delete()
else:
return self.respond({"Cannot delete last Dashboard."}, status=409)
return self.respond(status=204)
@extend_schema(
operation_id="Edit an Organization's Custom Dashboard",
parameters=[GlobalParams.ORG_ID_OR_SLUG, DashboardParams.DASHBOARD_ID],
request=DashboardDetailsSerializer,
responses={
200: DashboardDetailsModelSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DashboardExamples.DASHBOARD_PUT_RESPONSE,
)
def put(
self,
request: Request,
organization: Organization,
dashboard: Dashboard | dict[Any, Any] | None,
) -> Response:
"""
Edit an organization's custom dashboard as well as any bulk
edits on widgets that may have been made. (For example, widgets
that have been rearranged, updated queries and fields, specific
display types, and so on.)
"""
if not features.has(EDIT_FEATURE, organization, actor=request.user):
return Response(status=404)
self.check_object_permissions(request, dashboard)
if isinstance(dashboard, Dashboard) and dashboard.prebuilt_id is not None:
return self.respond({"Cannot edit prebuilt Dashboards."}, status=409)
tombstone = None
if isinstance(dashboard, dict):
tombstone = dashboard["id"]
dashboard = None
serializer = DashboardDetailsSerializer(
data=request.data,
instance=dashboard,
context={
"organization": organization,
"request": request,
"projects": self.get_projects(request, organization),
"environment": self.request.GET.getlist("environment"),
},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
try:
with transaction.atomic(router.db_for_write(DashboardTombstone)):
serializer.save()
if tombstone:
DashboardTombstone.objects.get_or_create(
organization=organization, slug=tombstone
)
except IntegrityError:
return self.respond({"Dashboard with that title already exists."}, status=409)
return self.respond(serialize(serializer.instance, request.user), status=200)
@region_silo_endpoint
| OrganizationDashboardDetailsEndpoint |
python | PrefectHQ__prefect | src/prefect/server/events/services/actions.py | {
"start": 642,
"end": 1850
} | class ____(RunInEphemeralServers, Service):
"""Runs the actions triggered by automations"""
consumer_task: asyncio.Task[None] | None = None
@classmethod
def service_settings(cls) -> ServicesBaseSetting:
return get_current_settings().server.services.triggers
async def start(self) -> NoReturn:
assert self.consumer_task is None, "Actions already started"
self.consumer: Consumer = create_consumer(
"actions", name=generate_unique_consumer_name("actions")
)
async with actions.consumer() as handler:
self.consumer_task = asyncio.create_task(self.consumer.run(handler))
logger.debug("Actions started")
try:
await self.consumer_task
except asyncio.CancelledError:
pass
async def stop(self) -> None:
assert self.consumer_task is not None, "Actions not started"
self.consumer_task.cancel()
try:
await self.consumer_task
except asyncio.CancelledError:
pass
finally:
await self.consumer.cleanup()
self.consumer_task = None
logger.debug("Actions stopped")
| Actions |
python | ray-project__ray | rllib/models/tf/layers/noisy_layer.py | {
"start": 305,
"end": 3961
} | class ____(tf.keras.layers.Layer if tf else object):
r"""A Layer that adds learnable Noise to some previous layer's outputs.
Consists of:
- a common dense layer: y = w^{T}x + b
- a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
(b+\epsilon_b*\sigma_b)
, where \epsilon are random variables sampled from factorized normal
distributions and \sigma are trainable variables which are expected to
vanish along the training procedure.
"""
def __init__(
self, prefix: str, out_size: int, sigma0: float, activation: str = "relu"
):
"""Initializes a NoisyLayer object.
Args:
prefix:
out_size: Output size for Noisy Layer
sigma0: Initialization value for sigma_b (bias noise)
non_linear: Non-linear activation for Noisy Layer
"""
super().__init__()
self.prefix = prefix
self.out_size = out_size
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
self.sigma0 = sigma0 # 0.5~GPU, 0.1~CPU
self.activation = activation
# Variables.
self.w = None # Weight matrix.
self.b = None # Biases.
self.sigma_w = None # Noise for weight matrix
self.sigma_b = None # Noise for biases.
if log_once("noisy_layer"):
deprecation_warning(
old="rllib.models.tf.layers.NoisyLayer",
)
def build(self, input_shape: TensorShape):
in_size = int(input_shape[1])
self.sigma_w = get_variable(
value=tf.keras.initializers.RandomUniform(
minval=-1.0 / np.sqrt(float(in_size)),
maxval=1.0 / np.sqrt(float(in_size)),
),
trainable=True,
tf_name=self.prefix + "_sigma_w",
shape=[in_size, self.out_size],
dtype=tf.float32,
)
self.sigma_b = get_variable(
value=tf.keras.initializers.Constant(self.sigma0 / np.sqrt(float(in_size))),
trainable=True,
tf_name=self.prefix + "_sigma_b",
shape=[self.out_size],
dtype=tf.float32,
)
self.w = get_variable(
value=tf.keras.initializers.GlorotUniform(),
tf_name=self.prefix + "_fc_w",
trainable=True,
shape=[in_size, self.out_size],
dtype=tf.float32,
)
self.b = get_variable(
value=tf.keras.initializers.Zeros(),
tf_name=self.prefix + "_fc_b",
trainable=True,
shape=[self.out_size],
dtype=tf.float32,
)
def call(self, inputs: TensorType) -> TensorType:
in_size = int(inputs.shape[1])
epsilon_in = tf.random.normal(shape=[in_size])
epsilon_out = tf.random.normal(shape=[self.out_size])
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0)
)
epsilon_b = epsilon_out
action_activation = (
tf.matmul(inputs, self.w + self.sigma_w * epsilon_w)
+ self.b
+ self.sigma_b * epsilon_b
)
fn = get_activation_fn(self.activation, framework="tf")
if fn is not None:
action_activation = fn(action_activation)
return action_activation
def _f_epsilon(self, x: TensorType) -> TensorType:
return tf.math.sign(x) * tf.math.sqrt(tf.math.abs(x))
| NoisyLayer |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 54271,
"end": 72604
} | class ____(TestDetrPreTrainedModel):
def __init__(self, config: TestDetrConfig):
super().__init__(config)
# Create backbone + positional encoding
backbone = TestDetrConvEncoder(config)
position_embeddings = build_position_encoding(config)
self.backbone = TestDetrConvModel(backbone, position_embeddings)
# Create input projection layers
if config.num_feature_levels > 1:
num_backbone_outs = len(backbone.intermediate_channel_sizes)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.intermediate_channel_sizes[_]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=1),
nn.GroupNorm(32, config.d_model),
)
)
for _ in range(config.num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(
in_channels,
config.d_model,
kernel_size=3,
stride=2,
padding=1,
),
nn.GroupNorm(32, config.d_model),
)
)
in_channels = config.d_model
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(
backbone.intermediate_channel_sizes[-1],
config.d_model,
kernel_size=1,
),
nn.GroupNorm(32, config.d_model),
)
]
)
if not config.two_stage:
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2)
self.encoder = TestDetrEncoder(config)
self.decoder = TestDetrDecoder(config)
self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
if config.two_stage:
self.enc_output = nn.Linear(config.d_model, config.d_model)
self.enc_output_norm = nn.LayerNorm(config.d_model)
self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2)
self.pos_trans_norm = nn.LayerNorm(config.d_model * 2)
else:
self.reference_points = nn.Linear(config.d_model, 2)
self.post_init()
def get_encoder(self):
return self.encoder
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
def get_valid_ratio(self, mask, dtype=torch.float32):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(mask[:, :, 0], 1)
valid_width = torch.sum(mask[:, 0, :], 1)
valid_ratio_height = valid_height.to(dtype) / height
valid_ratio_width = valid_width.to(dtype) / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def get_proposal_pos_embed(self, proposals):
"""Get the position embedding of the proposals."""
num_pos_feats = self.config.d_model // 2
temperature = 10000
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=proposals.dtype, device=proposals.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
# batch_size, num_queries, 4
proposals = proposals.sigmoid() * scale
# batch_size, num_queries, 4, 128
pos = proposals[:, :, :, None] / dim_t
# batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
return pos
def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
"""Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.
padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.
spatial_shapes (list[tuple[int, int]]): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
"""
batch_size = enc_output.shape[0]
proposals = []
_cur = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = meshgrid(
torch.linspace(
0,
height - 1,
height,
dtype=enc_output.dtype,
device=enc_output.device,
),
torch.linspace(
0,
width - 1,
width,
dtype=enc_output.dtype,
device=enc_output.device,
),
indexing="ij",
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_height = torch.ones_like(grid) * 0.05 * (2.0**level)
proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)
proposals.append(proposal)
_cur += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
# assign each pixel as an object query
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
object_query = self.enc_output_norm(self.enc_output(object_query))
return object_query, output_proposals
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], TestDetrModelOutput]:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
Examples:
```python
>>> from transformers import AutoImageProcessor, TestDetrModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr")
>>> model = TestDetrModel.from_pretrained("SenseTime/deformable-detr")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 300, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device)
# Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# which is a list of tuples
features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
masks = []
for level, (source, mask) in enumerate(features):
sources.append(self.input_proj[level](source))
masks.append(mask)
if mask is None:
raise ValueError("No attention mask was provided")
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
if self.config.num_feature_levels > len(sources):
_len_sources = len(sources)
for level in range(_len_sources, self.config.num_feature_levels):
if level == _len_sources:
source = self.input_proj[level](features[-1][0])
else:
source = self.input_proj[level](sources[-1])
mask = nn.functional.interpolate(pixel_mask[None].to(pixel_values.dtype), size=source.shape[-2:]).to(
torch.bool
)[0]
pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
sources.append(source)
masks.append(mask)
position_embeddings_list.append(pos_l)
# Create queries
query_embeds = None
if not self.config.two_stage:
query_embeds = self.query_position_embeddings.weight
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes_list = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes_list.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=source_flatten,
attention_mask=mask_flatten,
position_embeddings=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# Fifth, prepare decoder inputs
batch_size, _, num_channels = encoder_outputs[0].shape
enc_outputs_class = None
enc_outputs_coord_logits = None
if self.config.two_stage:
object_query_embedding, output_proposals = self.gen_encoder_output_proposals(
encoder_outputs[0], ~mask_flatten, spatial_shapes_list
)
# hack implementation for two-stage Deformable DETR
# apply a detection head to each pixel (A.4 in paper)
# linear projection for bounding box binary classification (i.e. foreground and background)
enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding)
# 3-layer FFN to predict bounding boxes coordinates (bbox regression branch)
delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding)
enc_outputs_coord_logits = delta_bbox + output_proposals
# only keep top scoring `config.two_stage_num_proposals` proposals
topk = self.config.two_stage_num_proposals
topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
topk_coords_logits = torch.gather(
enc_outputs_coord_logits,
1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 4),
)
topk_coords_logits = topk_coords_logits.detach()
reference_points = topk_coords_logits.sigmoid()
init_reference_points = reference_points
pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits)))
query_embed, target = torch.split(pos_trans_out, num_channels, dim=2)
else:
query_embed, target = torch.split(query_embeds, num_channels, dim=1)
query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1)
target = target.unsqueeze(0).expand(batch_size, -1, -1)
reference_points = self.reference_points(query_embed).sigmoid()
init_reference_points = reference_points
decoder_outputs = self.decoder(
inputs_embeds=target,
position_embeddings=query_embed,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=mask_flatten,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None)
tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs
return tuple_outputs
return TestDetrModelOutput(
init_reference_points=init_reference_points,
last_hidden_state=decoder_outputs.last_hidden_state,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
)
| TestDetrModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/components.py | {
"start": 4445,
"end": 5427
} | class ____(StateMigration):
cursor_field: str
config: Config
cursor_format: Optional[str] = None
def __init__(self, cursor_field, config: Config, cursor_format: Optional[str] = None):
self.cursor_field = cursor_field
self.cursor_format = cursor_format
self.config = config
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
# if start date wasn't provided in the config default date will be used
start_date = self.config.get("start_date", "2006-06-01T00:00:00.000Z")
if self.cursor_format:
dt = ab_datetime_parse(start_date)
formatted_start_date = DatetimeParser().format(dt, self.cursor_format)
return {self.cursor_field: formatted_start_date}
return {self.cursor_field: start_date}
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
return stream_state.get(self.cursor_field) == ""
@dataclass
| MigrateEmptyStringState |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs.py | {
"start": 2393,
"end": 2447
} | class ____:
f: F = F()
g: G = G()
@attr.frozen
| I |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/server.py | {
"start": 13744,
"end": 53923
} | class ____(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event: ThreadingEventType,
logger: logging.Logger,
server_threadpool_executor: FuturesAwareThreadPoolExecutor,
loadable_target_origin: Optional[LoadableTargetOrigin] = None,
heartbeat: bool = False,
heartbeat_timeout: int = 30,
lazy_load_user_code: bool = False,
fixed_server_id: Optional[str] = None,
entry_point: Optional[Sequence[str]] = None,
container_image: Optional[str] = None,
container_context: Optional[dict] = None,
inject_env_vars_from_instance: Optional[bool] = False,
instance_ref: Optional[InstanceRef] = None,
location_name: Optional[str] = None,
enable_metrics: bool = False,
defs_state_info: Optional[DefsStateInfo] = None,
):
super().__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
self._logger = logger
self._mp_ctx = multiprocessing.get_context("spawn")
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
self._executions: dict[str, tuple[multiprocessing.Process, InstanceRef]] = {}
self._termination_events: dict[str, MPEvent] = {}
self._termination_times: dict[str, float] = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._entry_point = (
check.sequence_param(entry_point, "entry_point", of_type=str)
if entry_point is not None
else DEFAULT_DAGSTER_ENTRY_POINT
)
self._container_image = check.opt_str_param(container_image, "container_image")
self._container_context = check.opt_dict_param(container_context, "container_context")
# When will this be set in a gRPC server?
# - When running `dagster dev` (or `dagster-webserver`) in the gRPC server subprocesses that are spun up
# - When running code in Dagster Cloud on 1.1 or later
# When will it not be set?
# - When running your own grpc server with `dagster api grpc`
# - When using an integration that spins up gRPC servers (for example, the Dagster Helm
# chart or the deploy_docker example)
self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)
self._exit_stack = ExitStack()
self._enable_metrics = check.bool_param(enable_metrics, "enable_metrics")
self._defs_state_info = check.opt_inst_param(
defs_state_info, "defs_state_info", DefsStateInfo
)
self._server_threadpool_executor = server_threadpool_executor
try:
from dagster._cli.utils import get_instance_for_cli
instance_required = inject_env_vars_from_instance or defs_state_info is not None
try:
# we only require the instance if we need it to inject env vars or to load state,
# so in other cases we can swallow the error, but we should try to get it if possible
self._instance = self._exit_stack.enter_context(
get_instance_for_cli(instance_ref=instance_ref)
)
if inject_env_vars_from_instance:
self._instance.inject_env_vars(location_name)
except DagsterError as e:
if instance_required:
raise e
self._instance = None
self._loaded_repositories: Optional[LoadedRepositories] = LoadedRepositories(
loadable_target_origin,
entry_point=self._entry_point,
container_image=self._container_image,
container_context=self._container_context,
# state info threaded through via CLI arguments
defs_state_info=self._defs_state_info,
)
except Exception:
if not lazy_load_user_code:
raise
self._loaded_repositories = None
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
if using_dagster_dev() and not use_verbose():
removed_system_frame_hint = (
lambda is_first_hidden_frame,
i: f" [{i} dagster system frames hidden, run with --verbose to see the full stack trace]\n"
if is_first_hidden_frame
else f" [{i} dagster system frames hidden]\n"
)
logger.error(
remove_system_frames_from_error(
unwrap_user_code_error(self._serializable_load_error),
build_system_frame_removed_hint=removed_system_frame_hint,
)
)
else:
self._logger.exception("Error while importing code")
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread: Optional[threading.Thread] = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
daemon=True,
)
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread,
args=(),
name="grpc-server-cleanup",
daemon=True,
)
self.__cleanup_thread.start()
def cleanup(self) -> None:
# In case ShutdownServer was not called
self._shutdown_once_executions_finish_event.set()
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
self._exit_stack.close()
def _heartbeat_thread(self, heartbeat_timeout: float) -> None:
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._logger.warning(
f"No heartbeat received in {heartbeat_timeout} seconds, shutting down"
)
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self) -> None:
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self) -> None:
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
message = get_run_crash_explanation(
prefix=f"Run execution process for {run.run_id}",
exit_code=check.not_none(process.exitcode),
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id: str) -> None:
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _get_repo_for_origin(
self,
remote_repo_origin: RemoteRepositoryOrigin,
) -> RepositoryDefinition:
if not self._loaded_repositories:
raise Exception(
f"Could not load definitions since the code server is in an error state: {check.not_none(self._serializable_load_error)}"
)
loaded_repos = check.not_none(self._loaded_repositories)
if remote_repo_origin.repository_name not in loaded_repos.definitions_by_name:
raise Exception(
f'Could not find a repository called "{remote_repo_origin.repository_name}"'
)
return loaded_repos.definitions_by_name[remote_repo_origin.repository_name]
def _get_reconstructable_repo_for_origin(
self,
remote_repo_origin: RemoteRepositoryOrigin,
) -> ReconstructableRepository:
if not self._loaded_repositories:
raise Exception(
f"Could not load definitions since the code server is in an error state: {check.not_none(self._serializable_load_error)}"
)
loaded_repos = check.not_none(self._loaded_repositories)
if remote_repo_origin.repository_name not in loaded_repos.definitions_by_name:
raise Exception(
f'Could not find a repository called "{remote_repo_origin.repository_name}"'
)
return loaded_repos.reconstructables_by_name[remote_repo_origin.repository_name]
def ReloadCode( # pyright: ignore[reportIncompatibleMethodOverride]
self, _request: dagster_api_pb2.ReloadCodeRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ReloadCodeReply:
self._logger.warn(
"Reloading definitions from a code server launched via `dagster api grpc` "
"without restarting the process is not currently supported. To enable this functionality, "
"launch the code server with the `dagster code-server start` command instead."
)
return dagster_api_pb2.ReloadCodeReply()
@retrieve_metrics()
def Ping(self, request, _context: grpc.ServicerContext) -> dagster_api_pb2.PingReply:
echo = request.echo
return dagster_api_pb2.PingReply(
echo=echo,
serialized_server_utilization_metrics=json.dumps(_UTILIZATION_METRICS)
if self._enable_metrics
else "",
)
def StreamingPing( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.StreamingPingRequest, _context: grpc.ServicerContext
) -> Iterator[dagster_api_pb2.StreamingPingEvent]:
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield dagster_api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.StreamingPingRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.PingReply:
self.__last_heartbeat_time = time.time()
echo = request.echo
return dagster_api_pb2.PingReply(echo=echo)
def GetServerId( # pyright: ignore[reportIncompatibleMethodOverride]
self, _request: dagster_api_pb2.Empty, _context: grpc.ServicerContext
) -> dagster_api_pb2.GetServerIdReply:
return dagster_api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExecutionPlanSnapshotRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExecutionPlanSnapshotReply:
execution_plan_args = deserialize_value(
request.serialized_execution_plan_snapshot_args,
ExecutionPlanSnapshotArgs,
)
try:
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
self._get_repo_for_origin(execution_plan_args.job_origin.repository_origin),
execution_plan_args.job_origin.job_name,
execution_plan_args,
)
except Exception:
_maybe_log_exception(self._logger, "ExecutionPlanSnapshot")
execution_plan_snapshot_or_error = ExecutionPlanSnapshotErrorData(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
return dagster_api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_value(execution_plan_snapshot_or_error)
)
def ListRepositories( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ListRepositoriesRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ListRepositoriesReply:
if self._serializable_load_error:
return dagster_api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_value(
self._serializable_load_error
)
)
try:
loaded_repositories = check.not_none(self._loaded_repositories)
serialized_response = serialize_value(
ListRepositoriesResponse(
loaded_repositories.loadable_repository_symbols,
executable_path=(
self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None
),
repository_code_pointer_dict=loaded_repositories.code_pointers_by_repo_name,
entry_point=self._entry_point,
container_image=self._container_image,
container_context=self._container_context,
dagster_library_versions=DagsterLibraryRegistry.get(),
defs_state_info=self._defs_state_info,
)
)
except Exception:
_maybe_log_exception(self._logger, "ListRepositories")
serialized_response = serialize_value(
serializable_error_info_from_exc_info(sys.exc_info())
)
return dagster_api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialized_response
)
def ExternalPartitionNames( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalPartitionNamesRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExternalPartitionNamesReply:
try:
partition_names_args = deserialize_value(
request.serialized_partition_names_args, PartitionNamesArgs
)
serialized_response = serialize_value(
get_partition_names(
self._get_repo_for_origin(partition_names_args.repository_origin),
job_name=partition_names_args.get_job_name(),
)
)
except Exception:
_maybe_log_exception(self._logger, "PartitionNames")
serialized_response = serialize_value(
PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
return dagster_api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialized_response
)
def ExternalNotebookData( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalNotebookDataRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExternalNotebookDataReply:
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return dagster_api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams( # pyright: ignore[reportIncompatibleMethodOverride]
self,
request: dagster_api_pb2.ExternalPartitionSetExecutionParamsRequest,
_context: grpc.ServicerContext,
) -> Iterable[dagster_api_pb2.StreamingChunkEvent]:
try:
args = deserialize_value(
request.serialized_partition_set_execution_param_args,
PartitionSetExecutionParamArgs,
)
instance_ref = args.instance_ref if args.instance_ref else self._instance_ref
serialized_data = serialize_value(
get_partition_set_execution_param_data(
self._get_repo_for_origin(args.repository_origin),
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
instance_ref=instance_ref,
)
)
except Exception:
_maybe_log_exception(self._logger, "PartitionSetExecutionParams")
serialized_data = serialize_value(
PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig( # pyright: ignore[reportIncompatibleMethodOverride]
self,
request: dagster_api_pb2.ExternalPartitionConfigRequest,
_context: grpc.ServicerContext,
) -> dagster_api_pb2.ExternalPartitionConfigReply:
try:
args = deserialize_value(request.serialized_partition_args, PartitionArgs)
instance_ref = args.instance_ref if args.instance_ref else self._instance_ref
serialized_data = serialize_value(
get_partition_config(
self._get_repo_for_origin(args.repository_origin),
job_name=args.get_job_name(),
partition_key=args.partition_name,
instance_ref=instance_ref,
)
)
except Exception:
_maybe_log_exception(self._logger, "ExternalPartitionConfig")
serialized_data = serialize_value(
PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
return dagster_api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialized_data
)
def ExternalPartitionTags( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalPartitionTagsRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExternalPartitionTagsReply:
try:
partition_args = deserialize_value(request.serialized_partition_args, PartitionArgs)
instance_ref = (
partition_args.instance_ref if partition_args.instance_ref else self._instance_ref
)
serialized_data = serialize_value(
get_partition_tags(
self._get_repo_for_origin(partition_args.repository_origin),
job_name=partition_args.get_job_name(),
partition_name=partition_args.partition_name,
instance_ref=instance_ref,
)
)
except Exception:
_maybe_log_exception(self._logger, "ExternalPartitionTags")
serialized_data = serialize_value(
PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
return dagster_api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialized_data
)
def ExternalPipelineSubsetSnapshot( # pyright: ignore[reportIncompatibleMethodOverride]
self,
request: dagster_api_pb2.ExternalPipelineSubsetSnapshotRequest,
_context: grpc.ServicerContext,
) -> dagster_api_pb2.ExternalPipelineSubsetSnapshotReply:
try:
job_subset_snapshot_args = deserialize_value(
request.serialized_pipeline_subset_snapshot_args,
JobSubsetSnapshotArgs,
)
serialized_external_pipeline_subset_result = serialize_value(
get_external_pipeline_subset_result(
self._get_repo_for_origin(
job_subset_snapshot_args.job_origin.repository_origin
),
self._get_reconstructable_repo_for_origin(
job_subset_snapshot_args.job_origin.repository_origin
),
job_subset_snapshot_args.job_origin.job_name,
job_subset_snapshot_args.op_selection,
job_subset_snapshot_args.asset_selection,
job_subset_snapshot_args.asset_check_selection,
job_subset_snapshot_args.include_parent_snapshot,
)
)
except Exception:
_maybe_log_exception(self._logger, "JobSubset")
serialized_external_pipeline_subset_result = serialize_value(
RemoteJobSubsetResult(
success=False, error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
return dagster_api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialized_external_pipeline_subset_result
)
def _get_serialized_external_repository_data(
self, request: dagster_api_pb2.ExternalRepositoryRequest
) -> str:
try:
repository_origin = deserialize_value(
request.serialized_repository_python_origin,
RemoteRepositoryOrigin,
)
return serialize_value(
RepositorySnap.from_def(
self._get_repo_for_origin(repository_origin),
defer_snapshots=request.defer_snapshots,
)
)
except Exception:
_maybe_log_exception(self._logger, "Repository")
return serialize_value(
RepositoryErrorSnap(error=serializable_error_info_from_exc_info(sys.exc_info()))
)
def ExternalRepository( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalRepositoryRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExternalRepositoryReply:
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return dagster_api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def ExternalJob( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalJobRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.ExternalJobReply:
try:
repository_origin = deserialize_value(
request.serialized_repository_origin,
RemoteRepositoryOrigin,
)
job_def = self._get_repo_for_origin(repository_origin).get_job(request.job_name)
ser_job_data = serialize_value(
JobDataSnap.from_job_def(job_def, include_parent_snapshot=True)
)
return dagster_api_pb2.ExternalJobReply(serialized_job_data=ser_job_data)
except Exception:
_maybe_log_exception(self._logger, "Job")
return dagster_api_pb2.ExternalJobReply(
serialized_error=serialize_value(
serializable_error_info_from_exc_info(sys.exc_info())
)
)
def StreamingExternalRepository( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.ExternalRepositoryRequest, _context: grpc.ServicerContext
) -> Iterable[dagster_api_pb2.StreamingExternalRepositoryEvent]:
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = math.ceil(
float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield dagster_api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(
self, serialized_data: str
) -> Iterable[dagster_api_pb2.StreamingChunkEvent]:
num_chunks = math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield dagster_api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution( # pyright: ignore[reportIncompatibleMethodOverride]
self,
request: dagster_api_pb2.ExternalScheduleExecutionRequest,
_context: grpc.ServicerContext,
) -> Iterable[dagster_api_pb2.StreamingChunkEvent]:
yield from self._split_serialized_data_into_chunk_events(
self._external_schedule_execution(request)
)
def SyncExternalScheduleExecution(self, request, _context: grpc.ServicerContext): # pyright: ignore[reportIncompatibleMethodOverride]
return dagster_api_pb2.ExternalScheduleExecutionReply(
serialized_schedule_result=self._external_schedule_execution(request)
)
def _external_schedule_execution(
self, request: dagster_api_pb2.ExternalScheduleExecutionRequest
) -> str:
try:
args = deserialize_value(
request.serialized_external_schedule_execution_args,
ExternalScheduleExecutionArgs,
)
return serialize_value(
get_external_schedule_execution(
self._get_repo_for_origin(args.repository_origin),
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
args.log_key,
)
)
except Exception:
_maybe_log_exception(self._logger, "ScheduleExecution")
return serialize_value(
ScheduleExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
def _external_sensor_execution(
self, request: dagster_api_pb2.ExternalSensorExecutionRequest
) -> str:
try:
args = deserialize_value(
request.serialized_external_sensor_execution_args,
SensorExecutionArgs,
)
return serialize_value(
get_external_sensor_execution(
self._get_repo_for_origin(args.repository_origin),
args.repository_origin.code_location_origin,
args.instance_ref,
args.sensor_name,
args.last_tick_completion_time,
args.last_run_key,
args.cursor,
args.log_key,
args.last_sensor_start_time,
)
)
except Exception:
_maybe_log_exception(self._logger, "SensorExecution")
return serialize_value(
SensorExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
)
@retrieve_metrics()
def SyncExternalSensorExecution(
self,
request: dagster_api_pb2.ExternalSensorExecutionRequest,
_context: grpc.ServicerContext,
) -> dagster_api_pb2.ExternalSensorExecutionReply:
return dagster_api_pb2.ExternalSensorExecutionReply(
serialized_sensor_result=self._external_sensor_execution(request)
)
@retrieve_metrics()
def ExternalSensorExecution(
self,
request: dagster_api_pb2.ExternalSensorExecutionRequest,
_context: grpc.ServicerContext,
) -> Iterable[dagster_api_pb2.StreamingChunkEvent]:
yield from self._split_serialized_data_into_chunk_events(
self._external_sensor_execution(request)
)
def ShutdownServer( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.Empty, _context: grpc.ServicerContext
) -> dagster_api_pb2.ShutdownServerReply:
try:
self._shutdown_once_executions_finish_event.set()
return dagster_api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_value(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except:
self._logger.exception("Failed to shut down server")
return dagster_api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_value(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.CancelExecutionRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.CancelExecutionReply:
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = deserialize_value(
request.serialized_cancel_execution_request,
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return dagster_api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_value(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.CanCancelExecutionRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.CanCancelExecutionReply:
can_cancel_execution_request = deserialize_value(
request.serialized_can_cancel_execution_request,
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return dagster_api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_value(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.StartRunRequest, _context: grpc.ServicerContext
) -> dagster_api_pb2.StartRunReply:
if self._shutdown_once_executions_finish_event.is_set():
return dagster_api_pb2.StartRunReply(
serialized_start_run_result=serialize_value(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_external_job_args = deserialize_value(
request.serialized_execute_run_args,
ExecuteExternalJobArgs,
)
run_id = execute_external_job_args.run_id
# reconstructable required for handing execution off to subprocess
recon_repo = check.not_none(self._loaded_repositories).reconstructables_by_name[
execute_external_job_args.job_origin.repository_origin.repository_name
]
recon_job = recon_repo.get_reconstructable_job(
execute_external_job_args.job_origin.job_name
)
except:
return dagster_api_pb2.StartRunReply(
serialized_start_run_result=serialize_value(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = self._mp_ctx.Queue()
termination_event = self._mp_ctx.Event()
execution_process = self._mp_ctx.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_job,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
# Cast here to convert `SpawnProcess` from event into regular `Process`-- not sure
# why not recognized as subclass, multiprocessing typing is a little rough.
cast("multiprocessing.Process", execution_process),
check.not_none(execute_external_job_args.instance_ref),
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
f"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
f"exit code {execution_process.exitcode}"
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return dagster_api_pb2.StartRunReply(
serialized_start_run_result=serialize_value(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def GetCurrentImage( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.Empty, _context: grpc.ServicerContext
) -> dagster_api_pb2.GetCurrentImageReply:
return dagster_api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_value(
GetCurrentImageResult(
current_image=self._container_image, serializable_error_info=None
)
)
)
def GetCurrentRuns( # pyright: ignore[reportIncompatibleMethodOverride]
self, request: dagster_api_pb2.Empty, _context: grpc.ServicerContext
) -> dagster_api_pb2.GetCurrentRunsReply:
with self._execution_lock:
return dagster_api_pb2.GetCurrentRunsReply(
serialized_current_runs=serialize_value(
GetCurrentRunsResult(
current_runs=list(self._executions.keys()), serializable_error_info=None
)
)
)
def server_termination_target(termination_event, server, logger):
termination_event.wait()
shutdown_grace_period = default_grpc_server_shutdown_grace_period()
logger.info(
f"Stopping server once all current RPC calls terminate or {shutdown_grace_period} seconds"
" pass"
)
finished_shutting_down_rpcs_event = server.stop(grace=shutdown_grace_period)
finished_shutting_down_rpcs_event.wait(shutdown_grace_period + 5)
if not finished_shutting_down_rpcs_event.is_set():
logger.warning("Server did not shut down cleanly")
| DagsterApiServer |
python | facebook__pyre-check | client/error.py | {
"start": 4902,
"end": 9626
} | class ____:
path: Optional[Path]
description: str
code: int
start_line: Optional[int]
start_column: Optional[int]
stop_line: Optional[int]
stop_column: Optional[int]
@staticmethod
def from_json(error_json: Dict[str, Any]) -> "TaintConfigurationError":
try:
error_location = error_json["location"]
if error_location is not None:
start_line = error_location["start"]["line"]
start_column = error_location["start"]["column"]
stop_line = error_location["stop"]["line"]
stop_column = error_location["stop"]["column"]
else:
start_line = None
start_column = None
stop_line = None
stop_column = None
return TaintConfigurationError(
path=(
Path(error_json["path"]) if error_json["path"] is not None else None
),
description=error_json["description"],
code=error_json["code"],
start_line=start_line,
start_column=start_column,
stop_line=stop_line,
stop_column=stop_column,
)
except KeyError as key_error:
message = f"Missing field from error json: {key_error}"
raise ErrorParsingFailure(message) from key_error
except TypeError as type_error:
message = f"Field type mismatch: {type_error}"
raise ErrorParsingFailure(message) from type_error
@staticmethod
def from_string(error_string: str) -> "TaintConfigurationError":
try:
return TaintConfigurationError.from_json(json.loads(error_string))
except json.JSONDecodeError as decode_error:
message = f"Cannot parse JSON: {decode_error}"
raise ErrorParsingFailure(message) from decode_error
def to_json(self) -> Dict[str, Any]:
return {
"path": str(self.path) if self.path is not None else None,
"description": self.description,
"code": self.code,
"start_line": self.start_line,
"start_column": self.start_column,
"stop_line": self.stop_line,
"stop_column": self.stop_column,
}
def to_text(self) -> str:
path = click.style(str(self.path or "?"), fg="red")
location = click.style(
(
f":{self.start_line}:{self.start_column}"
if (self.start_line is not None) and (self.start_column is not None)
else ""
),
fg="red",
)
return f"{path}{location} {self.description}"
def to_sarif(self) -> Dict[str, Any]:
return {
"ruleId": (
"PYRE-TAINT-CONFIGURATION-ERROR-" + str(self.code)
if self.code is not None
else "PYRE-TAINT-CONFIGURATION-ERROR-MDL"
),
"level": "error",
"message": {"text": self.description},
"locations": [
{
"physicalLocation": {
"artifactLocation": {
"uri": str(self.path) if self.path is not None else None,
},
"region": {
"startLine": (
self.start_line if self.start_line is not None else 0
),
"startColumn": (
self.start_column
if self.start_column is not None
else 0
),
"endLine": (
self.stop_line if self.stop_line is not None else 0
),
"endColumn": (
self.stop_column if self.stop_column is not None else 1
),
},
},
},
],
}
def get_sarif_rule(self) -> Dict[str, Any]:
return {
"id": (
"PYRE-TAINT-CONFIGURATION-ERROR-" + str(self.code)
if self.code is not None
else "PYRE-TAINT-CONFIGURATION-ERROR-MDL"
),
"name": "TaintConfigurationError",
"shortDescription": {"text": "Taint configuration error"},
"helpUri": "https://www.pyre-check.org",
"help": {"text": "Taint Configuration error"},
}
@dataclasses.dataclass(frozen=True)
| TaintConfigurationError |
python | huggingface__transformers | src/transformers/models/falcon_h1/modular_falcon_h1.py | {
"start": 36414,
"end": 36783
} | class ____(LlamaMLP):
def __init__(self, config: FalconH1Config):
super().__init__(config)
self.gate_multiplier, self.down_multiplier = config.mlp_multipliers
def forward(self, x):
y = self.up_proj(x) * self.act_fn(self.gate_proj(x) * self.gate_multiplier)
y = self.down_proj(y) * self.down_multiplier
return y
| FalconH1MLP |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 18033,
"end": 19493
} | class ____(TestCase):
"""Tests for ``random_permutation()``"""
def test_full_permutation(self):
"""ensure every item from the iterable is returned in a new ordering
15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so
we fix a seed value just to be sure.
"""
i = range(15)
r = mi.random_permutation(i)
self.assertEqual(set(i), set(r))
if i == r:
raise AssertionError("Values were not permuted")
def test_partial_permutation(self):
"""ensure all returned items are from the iterable, that the returned
permutation is of the desired length, and that all items eventually
get returned.
Sampling 100 permutations of length 5 from a set of 15 leaves a
(2/3)^100 chance that an item will not be chosen. Multiplied by 15
items, there is a 1 in 2.6e16 chance that at least 1 item will not
show up in the resulting output. Using a random seed will fix that.
"""
items = range(15)
item_set = set(items)
all_items = set()
for _ in range(100):
permutation = mi.random_permutation(items, 5)
self.assertEqual(len(permutation), 5)
permutation_set = set(permutation)
self.assertLessEqual(permutation_set, item_set)
all_items |= permutation_set
self.assertEqual(all_items, item_set)
| RandomPermutationTests |
python | mlflow__mlflow | mlflow/utils/autologging_utils/events.py | {
"start": 413,
"end": 2390
} | class ____:
"""
A wrapper around AutologgingEventLogger for DRY:
- Store common arguments to avoid passing them to each logger method
- Catches exceptions thrown by the logger and logs them
NB: We could not modify the AutologgingEventLogger class directly because
it is used in Databricks code base as well.
"""
def __init__(self, session, destination: Any, function_name: str):
self._session = session
self._destination = destination
self._function_name = function_name
self._logger = AutologgingEventLogger.get_logger()
@_catch_exception
def log_patch_function_start(self, args, kwargs):
self._logger.log_patch_function_start(
self._session, self._destination, self._function_name, args, kwargs
)
@_catch_exception
def log_patch_function_success(self, args, kwargs):
self._logger.log_patch_function_success(
self._session, self._destination, self._function_name, args, kwargs
)
@_catch_exception
def log_patch_function_error(self, args, kwargs, exception):
self._logger.log_patch_function_error(
self._session, self._destination, self._function_name, args, kwargs, exception
)
@_catch_exception
def log_original_function_start(self, args, kwargs):
self._logger.log_original_function_start(
self._session, self._destination, self._function_name, args, kwargs
)
@_catch_exception
def log_original_function_success(self, args, kwargs):
self._logger.log_original_function_success(
self._session, self._destination, self._function_name, args, kwargs
)
@_catch_exception
def log_original_function_error(self, args, kwargs, exception):
self._logger.log_original_function_error(
self._session, self._destination, self._function_name, args, kwargs, exception
)
| AutologgingEventLoggerWrapper |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 107108,
"end": 111457
} | class ____(CPointerBaseType):
# base_type CType Reference type
is_ptr = 1
is_unowned_view = True
default_value = "0"
exception_value = "NULL"
def __hash__(self):
return hash(self.base_type) + 27 # arbitrarily chosen offset
def __eq__(self, other):
if isinstance(other, CType) and other.is_ptr:
return self.base_type.same_as(other.base_type)
return False
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def is_simple_buffer_dtype(self):
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return True
if other_type.is_null_ptr:
return True
ptr_base_type = self.base_type
if ptr_base_type.is_cv_qualified:
ptr_base_type = ptr_base_type.cv_base_type
if ptr_base_type.is_cfunction:
if other_type.is_ptr:
other_type = other_type.base_type.resolve()
if other_type.is_cfunction:
return ptr_base_type.pointer_assignable_from_resolved_type(other_type)
else:
return False
if (ptr_base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(ptr_base_type)):
return True
if other_type.is_array or other_type.is_ptr:
return ptr_base_type.is_void or ptr_base_type.same_as(other_type.base_type)
return False
def assignment_failure_extra_info(self, src_type, src_name):
if self.base_type.is_cfunction and src_type.is_ptr:
src_type = src_type.base_type.resolve()
if self.base_type.is_cfunction and src_type.is_cfunction:
copied_src_type = copy.copy(src_type)
# make the exception values the same as us
copied_src_type.exception_check = self.base_type.exception_check
copied_src_type.exception_value = self.base_type.exception_value
if self.base_type.pointer_assignable_from_resolved_type(copied_src_type):
# the only reason we can't assign is because of exception incompatibility
msg = " Exception values are incompatible."
if not self.base_type.exception_check and self.base_type.exception_value is None:
if src_name is None:
src_name = "the value being assigned"
else:
src_name = "'{}'".format(src_name)
msg += f" Suggest adding 'noexcept' to the type of {src_name}."
return msg
return super().assignment_failure_extra_info(src_type, src_name)
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CPtrType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return {}
def find_cpp_operation_type(self, operator, operand_type=None):
if self.base_type.is_cpp_class:
return self.base_type.find_cpp_operation_type(operator, operand_type)
return None
def get_fused_types(self, result=None, seen=None, include_function_return_type=False):
# For function pointers, include the return type - unlike for fused functions themselves,
# where the return type cannot be an independent fused type (i.e. is derived or non-fused).
return super(CPointerBaseType, self).get_fused_types(result, seen, include_function_return_type=True)
| CPtrType |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 40104,
"end": 42054
} | class ____(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Examples:
>>> input_shape = (2, 4, 5, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalAveragePooling2D()(x)
>>> print(y.shape)
(2, 3)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
| GlobalAveragePooling2D |
python | marshmallow-code__marshmallow | performance/benchmark.py | {
"start": 455,
"end": 787
} | class ____(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
book_count = fields.Float()
age = fields.Float()
address = fields.Str()
full_name = fields.Method("get_full_name")
def get_full_name(self, author):
return f"{author.last}, {author.first}"
| AuthorSchema |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_reductions.py | {
"start": 7922,
"end": 9641
} | class ____:
@pytest.mark.parametrize(
"arr,argmax_expected,argmin_expected",
[
(SparseArray([1, 2, 0, 1, 2]), 1, 2),
(SparseArray([-1, -2, 0, -1, -2]), 2, 1),
(SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2),
(SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2),
(SparseArray([0] * 10 + [-1], fill_value=0), 0, 10),
(SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10),
(SparseArray([0] * 10 + [-1], fill_value=1), 0, 10),
(SparseArray([-1] + [0] * 10, fill_value=0), 1, 0),
(SparseArray([1] + [0] * 10, fill_value=0), 0, 1),
(SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0),
(SparseArray([1] + [0] * 10, fill_value=1), 0, 1),
],
)
def test_argmax_argmin(self, arr, argmax_expected, argmin_expected):
argmax_result = arr.argmax()
argmin_result = arr.argmin()
assert argmax_result == argmax_expected
assert argmin_result == argmin_expected
@pytest.mark.parametrize("method", ["argmax", "argmin"])
def test_empty_array(self, method):
msg = f"attempt to get {method} of an empty sequence"
arr = SparseArray([])
with pytest.raises(ValueError, match=msg):
getattr(arr, method)()
| TestArgmaxArgmin |
python | redis__redis-py | redis/connection.py | {
"start": 70692,
"end": 71595
} | class ____(ABC):
@abstractmethod
def get_protocol(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
@deprecated_args(
args_to_warn=["*"],
reason="Use get_connection() without args instead",
version="5.3.0",
)
def get_connection(
self, command_name: Optional[str], *keys, **options
) -> ConnectionInterface:
pass
@abstractmethod
def get_encoder(self):
pass
@abstractmethod
def release(self, connection: ConnectionInterface):
pass
@abstractmethod
def disconnect(self, inuse_connections: bool = True):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def set_retry(self, retry: Retry):
pass
@abstractmethod
def re_auth_callback(self, token: TokenInterface):
pass
| ConnectionPoolInterface |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_userroles_details.py | {
"start": 170,
"end": 1165
} | class ____(APITestCase):
endpoint = "sentry-api-0-userroles-details"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(is_superuser=True)
self.login_as(user=self.user, superuser=True)
self.add_user_permission(self.user, "users.admin")
def test_fails_without_superuser(self) -> None:
self.user = self.create_user(is_superuser=False)
self.login_as(self.user)
self.create_user_role(name="test-role")
resp = self.get_response("test-role")
assert resp.status_code == 403
self.user.update(is_superuser=True)
resp = self.get_response("test-role")
assert resp.status_code == 403
def test_fails_without_users_admin_permission(self) -> None:
self.user = self.create_user(is_superuser=True)
self.login_as(self.user, superuser=True)
resp = self.get_response("test-role")
assert resp.status_code == 403
@control_silo_test
| UserRolesDetailsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.