language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytorch__pytorch
|
torch/autograd/variable.py
|
{
"start": 256,
"end": 391
}
|
class ____(torch._C._LegacyVariableBase, metaclass=VariableMeta): # type: ignore[misc]
_execution_engine = ImperativeEngine()
|
Variable
|
python
|
pytorch__pytorch
|
torch/nn/attention/experimental/_paged_attention.py
|
{
"start": 468,
"end": 13026
}
|
class ____:
"""
PagedAttention supports flex attention inference with a large batch size.
With PagedAttention, a batch of key/value tensors with varying kv length
is split into tensor blocks of fixed length and cached in a compact way.
Thus we can avoid redundant memory consumption due to varying kv length and
support a larger batch size.
"""
def __init__(
self,
n_pages: int,
page_size: int,
max_batch_size: int,
device: str = "cuda",
) -> None:
# number of pages
self.n_pages = n_pages
# number of tokens per page
self.page_size = page_size
# page table: [batch, logical_block_idx] -> physical_page_idx
self.page_table = -torch.ones(
(max_batch_size, self.n_pages), dtype=torch.int64, device=device
)
# capacity: batch_idx -> allocated sequence length
self.capacity = torch.zeros(max_batch_size, dtype=torch.int64, device=device)
# index of empty pages that is available for allocation
self.empty_pages = list(range(n_pages - 1, -1, -1))
# mapping from physical page index to logical page index
self.physical_to_logical = -torch.ones(
(max_batch_size, n_pages), dtype=torch.int64, device=device
)
def reserve(self, batch_idx: torch.Tensor, seq_len: torch.Tensor) -> None:
"""
Requests the capacity of a given batch to be at least enough to
hold `seq_len` elements.
Args:
batch_idx (Tensor): batch index to be reserved; shape :math:`(1)`.
seq_len (Tensor): minimum capacity for the given batch; shape :math:`(1)`.
"""
if seq_len <= self.capacity[batch_idx]:
return
num_pages_to_allocate = _cdiv(
seq_len - self.capacity[batch_idx], self.page_size
)
assert len(self.empty_pages) >= num_pages_to_allocate, (
f"requested {num_pages_to_allocate.item()} pages "
f"but there are only {len(self.empty_pages)} empty pages"
)
start_page_idx = self.capacity[batch_idx] // self.page_size
end_page_idx = start_page_idx + num_pages_to_allocate
# find empty physical pages
allocated_pages = torch.tensor(
self.empty_pages[-num_pages_to_allocate:],
device=num_pages_to_allocate.device,
)
self.empty_pages = self.empty_pages[:-num_pages_to_allocate]
# update page table
self.page_table[
batch_idx,
start_page_idx:end_page_idx,
] = allocated_pages
# update metadata
self.physical_to_logical[batch_idx, allocated_pages] = torch.arange(
start_page_idx.item(),
end_page_idx.item(),
device=num_pages_to_allocate.device,
)
self.capacity[batch_idx] += num_pages_to_allocate * self.page_size
def erase(self, batch_idx: torch.Tensor) -> None:
"""
Removes a single batch from paged attention.
Args:
batch_idx (Tensor): batch index to be removed; shape :math:`(1)`.
"""
# find allocated pages
allocated_page_idx = self.page_table[batch_idx] != -1
allocated_pages = self.page_table[batch_idx][allocated_page_idx]
# clean metadata
self.capacity[batch_idx] = 0
self.empty_pages += allocated_pages.tolist()
self.physical_to_logical[batch_idx][:, allocated_pages] = -1
self.page_table[batch_idx] = -1
def assign(
self,
batch_idx: torch.Tensor,
input_pos: torch.Tensor,
k_val: torch.Tensor,
v_val: torch.Tensor,
k_cache: torch.Tensor,
v_cache: torch.Tensor,
) -> None:
"""
Assigns new contents `val` to the storage `cache` at the location
`batch_idx` and `input_pos`.
Args:
batch_idx (Tensor): batch index; shape :math:`(B)`.
input_pos (Tensor): input positions to be assigned for the given batch; shape :math:`(B, S)`.
val (Tensor): value to be assigned; shape :math:`(B, H, S, D)`
cache (Tensor): the cache to store the values; shape:`(1, H, MAX_S, D)`
"""
if k_val.requires_grad:
raise RuntimeError("val must not require gradient")
B, H, S, K_D = k_val.shape
V_D = v_val.shape[3]
if B != batch_idx.shape[0]:
raise RuntimeError(
f"Expect val and batch_idx have the same batch size "
f"but got B={B} and B={batch_idx.shape[0]}."
)
if H != k_cache.shape[1]:
raise RuntimeError(
f"Expect val and cache has the same number of heads "
f"but got H={H} and H={k_cache.shape[1]}."
)
if S != input_pos.shape[1]:
raise RuntimeError(
f"Expect val and input_pos has the same length "
f"but got S={S} and S={input_pos.shape[0]}."
)
if K_D != k_cache.shape[3]:
raise RuntimeError(
f"Expect k_val and k_cache has the same hidden dim "
f"but got D={K_D} and D={k_cache.shape[3]}."
)
if V_D != v_cache.shape[3]:
raise RuntimeError(
f"Expect v_val and v_cache has the same hidden dim "
f"but got D={V_D} and D={v_cache.shape[3]}."
)
# find address
logical_block_idx = input_pos // self.page_size # [B, S]
logical_block_offset = input_pos % self.page_size # [B, S]
physical_block_idx = torch.gather(
self.page_table[batch_idx], 1, logical_block_idx.to(torch.int64)
).to(torch.int32) # [B, S]
addr = (physical_block_idx * self.page_size + logical_block_offset).view(
-1
) # [B*S]
k_val = k_val.permute(1, 0, 2, 3).contiguous().view(1, H, B * S, K_D)
v_val = v_val.permute(1, 0, 2, 3).contiguous().view(1, H, B * S, V_D)
k_cache[:, :, addr, :] = k_val
v_cache[:, :, addr, :] = v_val
def convert_logical_block_mask(
self,
block_mask: BlockMask,
batch_idx: torch.Tensor | None = None,
kv_len: torch.Tensor | None = None,
) -> BlockMask:
"""
Converts a logical block mask by mapping its logical kv indices to the corresponding
physical kv indices.
Args:
block_mask (BlockMask): logical block mask;
kv_indices shape :math:`(B, H, ROWS, MAX_BLOCKS_IN_COL)`.
batch_idx (Tensor): batch index corresponding to the block_mask
batch dimension. This provides flexibility to convert a
block mask with smaller batch size than the page table;
shape :math:`(B)`.
kv_len (Optional[Tensor]): actual KV sequence length for upper bound check;
shape :math:`(B,)` to handle multiple batches.
"""
B, H, ROWS, MAX_BLOCKS_IN_COL = block_mask.kv_indices.shape
if block_mask.BLOCK_SIZE[1] != self.page_size:
raise RuntimeError(
f"Expect block_mask has the same column block size as page_size"
f"but got size={block_mask.BLOCK_SIZE[1]} and size={self.page_size}"
)
# Increase the num columns of converted block mask from logical block mask's
# num columns to n_pages, since a) the converted block mask
# may have larger indices values; and b) `_ordered_to_dense` realizes
# a dense tensor with these converted indices. There would be an IndexError
# if using the logical block mask's num columns.
device = block_mask.kv_num_blocks.device
if batch_idx is None:
batch_idx = torch.arange(B, device=device)
page_table = self.page_table[batch_idx]
new_kv_num_blocks = block_mask.kv_num_blocks.clone()
new_kv_indices = torch.zeros(
(B, H, ROWS, self.n_pages), dtype=torch.int32, device=device
)
new_kv_indices[:, :, :, :MAX_BLOCKS_IN_COL] = (
torch.gather(
page_table, 1, block_mask.kv_indices.view(B, -1).to(torch.int64)
)
.view(block_mask.kv_indices.shape)
.to(torch.int32)
)
new_full_kv_indices, new_full_kv_num_blocks = None, None
if block_mask.full_kv_num_blocks is not None:
assert block_mask.full_kv_indices is not None
new_full_kv_num_blocks = block_mask.full_kv_num_blocks.clone()
new_full_kv_indices = torch.zeros(
(B, H, ROWS, self.n_pages), dtype=torch.int32, device=device
)
new_full_kv_indices[:, :, :, :MAX_BLOCKS_IN_COL] = (
torch.gather(
page_table,
1,
block_mask.full_kv_indices.view(B, -1).to(torch.int64),
)
.view(block_mask.full_kv_indices.shape)
.to(torch.int32)
)
new_mask_mod = self.get_mask_mod(block_mask.mask_mod, kv_len)
seq_lengths = (block_mask.seq_lengths[0], self.n_pages * self.page_size)
return BlockMask.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
block_mask.BLOCK_SIZE,
new_mask_mod,
seq_lengths=seq_lengths,
)
def get_mask_mod(
self,
mask_mod: _mask_mod_signature | None,
kv_len: torch.Tensor | None = None,
) -> _mask_mod_signature:
"""
Converts a mask_mod based on mapping from the physical block index to the logical
block index.
Args:
mask_mod (_mask_mod_signature): mask_mod based on the logical block index.
kv_len (Optional[torch.Tensor]): actual KV sequence length for upper bound check.
"""
if mask_mod is None:
mask_mod = noop_mask
def new_mask_mod(
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
physical_kv_idx: torch.Tensor,
):
physical_kv_block = physical_kv_idx // self.page_size
physical_kv_offset = physical_kv_idx % self.page_size
logical_block_idx = self.physical_to_logical[b, physical_kv_block]
logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset
live_block = logical_block_idx >= 0
within_upper_bound = (
logical_kv_idx < kv_len[b] if kv_len is not None else True
)
within_lower_bound = logical_kv_idx >= 0
is_valid = live_block & within_upper_bound & within_lower_bound
return torch.where(is_valid, mask_mod(b, h, q_idx, logical_kv_idx), False)
return new_mask_mod
def get_score_mod(
self,
score_mod: _score_mod_signature | None,
kv_len: torch.Tensor | None = None,
) -> _score_mod_signature:
"""
Converts a score_mod based on mapping from the physical block index to the logical
block index.
Args:
score_mod (_score_mod_signature): score_mod based on the logical block index.
`kv_len (Optional[torch.Tensor]): actual KV sequence length for upper bound check.
"""
if score_mod is None:
score_mod = _identity
def new_score_mod(
score: torch.Tensor,
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
physical_kv_idx: torch.Tensor,
):
physical_kv_block = physical_kv_idx // self.page_size
physical_kv_offset = physical_kv_idx % self.page_size
logical_block_idx = self.physical_to_logical[b, physical_kv_block]
logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset
live_block = logical_block_idx >= 0
within_upper_bound = (
logical_kv_idx < kv_len[b] if kv_len is not None else True
)
within_lower_bound = logical_kv_idx >= 0
is_valid = live_block & within_upper_bound & within_lower_bound
return torch.where(
is_valid,
score_mod(score, b, h, q_idx, logical_kv_idx),
float("-inf"),
)
return new_score_mod
|
PagedAttention
|
python
|
pytorch__pytorch
|
test/test_serialization.py
|
{
"start": 3664,
"end": 3772
}
|
class ____:
class Nested:
pass
def up_size(size):
return (*size[:-1], size[-1] * 2)
|
ClassBMock
|
python
|
huggingface__transformers
|
src/transformers/models/bert_generation/tokenization_bert_generation.py
|
{
"start": 985,
"end": 4370
}
|
class ____(SentencePieceBackend):
"""
Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The begin of sequence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
sep_token (`str`, *optional*, defaults to `"<::::>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
prefix_tokens: list[int] = []
model_input_names = ["input_ids", "attention_mask"]
is_fast = False
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
sep_token="<::::>",
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
# Call parent init (which will load sp_model)
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
sep_token=sep_token,
sp_model_kwargs=self.sp_model_kwargs,
special_tokens_pattern="none",
**kwargs,
)
__all__ = ["BertGenerationTokenizer"]
|
BertGenerationTokenizer
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_summarization.py
|
{
"start": 547,
"end": 982
}
|
class ____(BaseChatModel):
"""Mock chat model for testing."""
def invoke(self, prompt): # type: ignore[no-untyped-def]
return AIMessage(content="Generated summary")
def _generate(self, messages, **kwargs): # type: ignore[no-untyped-def]
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
|
MockChatModel
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/model_query_type_annotation.py
|
{
"start": 436,
"end": 885
}
|
class ____:
def test2_f1(self, taint_1: Test2_T1, taint_2: Test2_T2, no_taint_1: Test2_Foo):
# self should not be tainted
pass
def test2_f2(self, taint_1: Dict[int, Test2_T1]):
# self should not be tainted
pass
def test2_f3(self, no_taint_1: int, no_taint_2: str):
# self should not be tainted
pass
def test4_taint_1(x) -> str:
pass
def test4_no_taint_1(x) -> int:
pass
|
Test2_C
|
python
|
huggingface__transformers
|
src/transformers/utils/hub.py
|
{
"start": 38898,
"end": 39554
}
|
class ____:
"""
Internal class to keep track of a push in progress (which might contain multiple `Future` jobs).
"""
def __init__(self, jobs: futures.Future | None = None) -> None:
self.jobs = [] if jobs is None else jobs
def is_done(self):
return all(job.done() for job in self.jobs)
def wait_until_done(self):
futures.wait(self.jobs)
def cancel(self) -> None:
self.jobs = [
job
for job in self.jobs
# Cancel the job if it wasn't started yet and remove cancelled/done jobs from the list
if not (job.cancel() or job.done())
]
|
PushInProgress
|
python
|
django__django
|
tests/admin_utils/models.py
|
{
"start": 2167,
"end": 2201
}
|
class ____(VehicleMixin):
pass
|
Car
|
python
|
pytorch__pytorch
|
torch/export/_wrapper_utils.py
|
{
"start": 15,
"end": 271
}
|
class ____(torch.nn.Module):
def __init__(self, f): # type: ignore[no-untyped-def]
super().__init__()
self.f = f
def forward(self, *args, **kwargs): # type: ignore[no-untyped-def]
return self.f(*args, **kwargs)
|
_WrapperModule
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/flowchart/library/Operators.py
|
{
"start": 2032,
"end": 2216
}
|
class ____(UniOpNode):
"""Returns abs(Inp). Does not check input types."""
nodeName = 'Abs'
def __init__(self, name):
UniOpNode.__init__(self, name, '__abs__')
|
AbsNode
|
python
|
tiangolo__fastapi
|
tests/test_multi_body_errors.py
|
{
"start": 226,
"end": 8267
}
|
class ____(BaseModel):
name: str
age: condecimal(gt=Decimal(0.0)) # type: ignore
@app.post("/items/")
def save_item_no_body(item: List[Item]):
return {"item": item}
client = TestClient(app)
def test_put_correct_body():
response = client.post("/items/", json=[{"name": "Foo", "age": 5}])
assert response.status_code == 200, response.text
assert response.json() == {
"item": [
{
"name": "Foo",
"age": IsOneOf(
5,
# TODO: remove when deprecating Pydantic v1
"5",
),
}
]
}
def test_jsonable_encoder_requiring_error():
response = client.post("/items/", json=[{"name": "Foo", "age": -1.0}])
assert response.status_code == 422, response.text
assert response.json() == IsDict(
{
"detail": [
{
"type": "greater_than",
"loc": ["body", 0, "age"],
"msg": "Input should be greater than 0",
"input": -1.0,
"ctx": {"gt": 0},
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"ctx": {"limit_value": 0.0},
"loc": ["body", 0, "age"],
"msg": "ensure this value is greater than 0",
"type": "value_error.number.not_gt",
}
]
}
)
def test_put_incorrect_body_multiple():
response = client.post("/items/", json=[{"age": "five"}, {"age": "six"}])
assert response.status_code == 422, response.text
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", 0, "name"],
"msg": "Field required",
"input": {"age": "five"},
},
{
"type": "decimal_parsing",
"loc": ["body", 0, "age"],
"msg": "Input should be a valid decimal",
"input": "five",
},
{
"type": "missing",
"loc": ["body", 1, "name"],
"msg": "Field required",
"input": {"age": "six"},
},
{
"type": "decimal_parsing",
"loc": ["body", 1, "age"],
"msg": "Input should be a valid decimal",
"input": "six",
},
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", 0, "name"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", 0, "age"],
"msg": "value is not a valid decimal",
"type": "type_error.decimal",
},
{
"loc": ["body", 1, "name"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", 1, "age"],
"msg": "value is not a valid decimal",
"type": "type_error.decimal",
},
]
}
)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Save Item No Body",
"operationId": "save_item_no_body_items__post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"title": "Item",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "age"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"age": IsDict(
{
"title": "Age",
"anyOf": [
{"exclusiveMinimum": 0.0, "type": "number"},
IsOneOf(
# pydantic < 2.12.0
{"type": "string"},
# pydantic >= 2.12.0
{
"type": "string",
"pattern": r"^(?!^[-+.]*$)[+-]?0*\d*\.?\d*$",
},
),
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "Age",
"exclusiveMinimum": 0.0,
"type": "number",
}
),
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
Item
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_insert.py
|
{
"start": 2385,
"end": 38526
}
|
class ____(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = "default_enhanced"
@testing.combinations(
((), ("z",), ()),
(("x",), (), ()),
(("x",), ("y",), ("x", "y")),
(("x", "y"), ("y",), ("x", "y")),
)
def test_return_defaults_generative(
self,
initial_keys: Tuple[str, ...],
second_keys: Tuple[str, ...],
expected_keys: Tuple[str, ...],
):
t = table("foo", column("x"), column("y"), column("z"))
initial_cols = tuple(t.c[initial_keys])
second_cols = tuple(t.c[second_keys])
expected = set(t.c[expected_keys])
stmt = t.insert().return_defaults(*initial_cols)
eq_(stmt._return_defaults_columns, initial_cols)
stmt = stmt.return_defaults(*second_cols)
assert isinstance(stmt._return_defaults_columns, tuple)
eq_(set(stmt._return_defaults_columns), expected)
@testing.variation("add_values", ["before", "after"])
@testing.variation("multi_values", [True, False])
@testing.variation("sort_by_parameter_order", [True, False])
def test_sort_by_parameter_ordering_parameter_no_multi_values(
self, add_values, multi_values, sort_by_parameter_order
):
t = table("foo", column("x"), column("y"), column("z"))
stmt = insert(t)
if add_values.before:
if multi_values:
stmt = stmt.values([{"y": 6}, {"y": 7}])
else:
stmt = stmt.values(y=6)
stmt = stmt.returning(
t.c.x, sort_by_parameter_order=bool(sort_by_parameter_order)
)
if add_values.after:
if multi_values:
stmt = stmt.values([{"y": 6}, {"y": 7}])
else:
stmt = stmt.values(y=6)
if multi_values:
if sort_by_parameter_order:
with expect_raises_message(
exc.CompileError,
"RETURNING cannot be determinstically sorted "
"when using an INSERT",
):
stmt.compile()
else:
self.assert_compile(
stmt,
"INSERT INTO foo (y) VALUES (:y_m0), (:y_m1) "
"RETURNING foo.x",
)
else:
self.assert_compile(
stmt,
"INSERT INTO foo (y) VALUES (:y) RETURNING foo.x",
)
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation.
See also test_compiler.py::CrudParamOverlapTest
"""
t = table("foo", column("x"), column("y"))
i = t.insert().values(x=3 + bindparam("x"))
self.assert_compile(i, "INSERT INTO foo (x) VALUES ((:param_1 + :x))")
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x), :y)",
params={"x": 1, "y": 2},
)
i = t.insert().values(x=bindparam("y"))
self.assert_compile(i, "INSERT INTO foo (x) VALUES (:y)")
i = t.insert().values(x=bindparam("y"), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam("y"), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam("x2"))
self.assert_compile(i, "INSERT INTO foo (x) VALUES ((:param_1 + :x2))")
self.assert_compile(
i, "INSERT INTO foo (x) VALUES ((:param_1 + :x2))", params={}
)
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={"x": 1, "y": 2},
)
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={"x2": 1, "y": 2},
)
def test_insert_literal_binds(self):
table1 = self.tables.mytable
stmt = table1.insert().values(myid=3, name="jack")
self.assert_compile(
stmt,
"INSERT INTO mytable (myid, name) VALUES (3, 'jack')",
literal_binds=True,
)
def test_insert_literal_binds_sequence_notimplemented(self):
table = Table(
"x",
MetaData(),
Column(
"y", Integer, normalize_sequence(config, Sequence("y_seq"))
),
)
dialect = default.DefaultDialect()
dialect.supports_sequences = True
stmt = table.insert().values(myid=3, name="jack")
assert_raises(
NotImplementedError,
stmt.compile,
compile_kwargs=dict(literal_binds=True),
dialect=dialect,
)
def test_inline_defaults(self):
m = MetaData()
foo = Table("foo", m, Column("id", Integer))
t = Table(
"test",
m,
Column("col1", Integer, default=func.foo(1)),
Column(
"col2",
Integer,
default=select(func.coalesce(func.max(foo.c.id))),
),
)
self.assert_compile(
t.insert().values({}),
"INSERT INTO test (col1, col2) VALUES (foo(:foo_1), "
"(SELECT coalesce(max(foo.id)) AS coalesce_1 FROM "
"foo))",
)
self.assert_compile(
t.insert().inline().values({}),
"INSERT INTO test (col1, col2) VALUES (foo(:foo_1), "
"(SELECT coalesce(max(foo.id)) AS coalesce_1 FROM "
"foo))",
)
def test_generic_insert_bind_params_all_columns(self):
table1 = self.tables.mytable
self.assert_compile(
insert(table1),
"INSERT INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description)",
)
def test_insert_with_values_dict(self):
table1 = self.tables.mytable
checkparams = {"myid": 3, "name": "jack"}
self.assert_compile(
insert(table1).values(myid=3, name="jack"),
"INSERT INTO mytable (myid, name) VALUES (:myid, :name)",
checkparams=checkparams,
)
def test_unconsumed_names_kwargs(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.insert().values(x=5, z=5).compile,
)
def test_bindparam_name_no_consume_error(self):
t = table("t", column("x"), column("y"))
# bindparam names don't get counted
i = t.insert().values(x=3 + bindparam("x2"))
self.assert_compile(i, "INSERT INTO t (x) VALUES ((:param_1 + :x2))")
# even if in the params list
i = t.insert().values(x=3 + bindparam("x2"))
self.assert_compile(
i, "INSERT INTO t (x) VALUES ((:param_1 + :x2))", params={"x2": 1}
)
def test_unconsumed_names_values_dict(self):
table1 = self.tables.mytable
checkparams = {"myid": 3, "name": "jack", "unknowncol": "oops"}
stmt = insert(table1).values(checkparams)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: unknowncol",
stmt.compile,
dialect=postgresql.dialect(),
)
def test_unconsumed_names_multi_values_dict(self):
table1 = self.tables.mytable
checkparams = [
{"myid": 3, "name": "jack", "unknowncol": "oops"},
{"myid": 4, "name": "someone", "unknowncol": "oops"},
]
stmt = insert(table1).values(checkparams)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: unknowncol",
stmt.compile,
dialect=postgresql.dialect(),
)
def test_insert_with_values_tuple(self):
table1 = self.tables.mytable
checkparams = {
"myid": 3,
"name": "jack",
"description": "mydescription",
}
self.assert_compile(
insert(table1).values([3, "jack", "mydescription"]),
"INSERT INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description)",
checkparams=checkparams,
)
def test_insert_with_values_func(self):
table1 = self.tables.mytable
self.assert_compile(
insert(table1).values(myid=func.lala()),
"INSERT INTO mytable (myid) VALUES (lala())",
)
def test_insert_with_user_supplied_bind_params(self):
table1 = self.tables.mytable
values = {
table1.c.myid: bindparam("userid"),
table1.c.name: bindparam("username"),
}
self.assert_compile(
insert(table1).values(values),
"INSERT INTO mytable (myid, name) VALUES (:userid, :username)",
)
def test_insert_values_multiple(self):
table1 = self.tables.mytable
values1 = {table1.c.myid: bindparam("userid")}
values2 = {table1.c.name: bindparam("username")}
self.assert_compile(
insert(table1).values(values1).values(values2),
"INSERT INTO mytable (myid, name) VALUES (:userid, :username)",
)
def test_prefix_with(self):
table1 = self.tables.mytable
stmt = (
table1.insert()
.prefix_with("A", "B", dialect="mysql")
.prefix_with("C", "D")
)
self.assert_compile(
stmt,
"INSERT C D INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description)",
)
self.assert_compile(
stmt,
"INSERT A B C D INTO mytable (myid, name, description) "
"VALUES (%s, %s, %s)",
dialect=mysql.dialect(),
)
def test_inline_default(self):
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=func.foobar()),
)
self.assert_compile(
table.insert().values(),
"INSERT INTO sometable (foo) VALUES (foobar())",
)
self.assert_compile(
table.insert(),
"INSERT INTO sometable (foo) VALUES (foobar())",
params={},
)
self.assert_compile(
table.insert().values().inline(),
"INSERT INTO sometable (foo) VALUES (foobar())",
)
self.assert_compile(
table.insert().inline(),
"INSERT INTO sometable (foo) VALUES (foobar())",
params={},
)
def test_insert_returning_not_in_default(self):
table1 = self.tables.mytable
stmt = table1.insert().returning(table1.c.myid)
self.assert_compile(
stmt,
"INSERT INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description) RETURNING mytable.myid",
dialect=default.DefaultDialect(),
)
def test_insert_from_select_returning(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = (
self.tables.myothertable.insert()
.from_select(("otherid", "othername"), sel)
.returning(self.tables.myothertable.c.otherid)
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable "
"WHERE mytable.name = %(name_1)s RETURNING myothertable.otherid",
checkparams={"name_1": "foo"},
dialect="postgresql",
)
def test_insert_from_select_select(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), sel
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable "
"WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_insert_from_select_seq(self):
m = MetaData()
t1 = Table(
"t",
m,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("id_seq")),
primary_key=True,
),
Column("data", String),
)
stmt = t1.insert().from_select(("data",), select(t1.c.data))
self.assert_compile(
stmt,
"INSERT INTO t (data, id) SELECT t.data, "
"nextval('id_seq') AS next_value_1 FROM t",
dialect=postgresql.dialect(),
)
def test_insert_seq_pk_multi_values(self):
"""test #6361"""
m = MetaData()
t1 = Table(
"t",
m,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("id_seq")),
primary_key=True,
),
Column("data", String),
)
stmt = t1.insert().values(
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
self.assert_compile(
stmt,
"INSERT INTO t (id, data) VALUES (nextval('id_seq'), "
"%(data_m0)s), (nextval('id_seq'), %(data_m1)s), "
"(nextval('id_seq'), %(data_m2)s)",
dialect=postgresql.dialect(),
)
def test_insert_seq_non_pk_multi_values(self):
"""test #6361"""
m = MetaData()
t1 = Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column(
"counter", normalize_sequence(config, Sequence("counter_seq"))
),
Column("data", String),
)
stmt = t1.insert().values(
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
self.assert_compile(
stmt,
"INSERT INTO t (counter, data) VALUES (nextval('counter_seq'), "
"%(data_m0)s), (nextval('counter_seq'), %(data_m1)s), "
"(nextval('counter_seq'), %(data_m2)s)",
dialect=postgresql.dialect(),
)
@testing.variation("paramstyle", ["pg", "qmark", "numeric", "dollar"])
def test_heterogeneous_multi_values(self, paramstyle):
"""for #6047, originally I thought we'd take any insert().values()
and be able to convert it to a "many" style execution that we can
cache.
however, this test shows that we cannot, at least not in the
general case, because SQL expressions are not guaranteed to be in
the same position each time, therefore each ``VALUES`` clause is not
of the same structure.
"""
m = MetaData()
t1 = Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
stmt = t1.insert().values(
[
{"x": 1, "y": func.sum(1, 2), "z": 2},
{"x": func.sum(1, 2), "y": 2, "z": 3},
{"x": func.sum(1, 2), "y": 2, "z": func.foo(10)},
]
)
pos_par = (
1,
1,
2,
2,
1,
2,
2,
3,
1,
2,
2,
10,
)
# SQL expressions in the params at arbitrary locations means
# we have to scan them at compile time, and the shape of the bound
# parameters is not predictable. so for #6047 where I originally
# thought all of values() could be rewritten, this makes it not
# really worth it.
if paramstyle.pg:
self.assert_compile(
stmt,
"INSERT INTO t (x, y, z) VALUES "
"(%(x_m0)s, sum(%(sum_1)s, %(sum_2)s), %(z_m0)s), "
"(sum(%(sum_3)s, %(sum_4)s), %(y_m1)s, %(z_m1)s), "
"(sum(%(sum_5)s, %(sum_6)s), %(y_m2)s, foo(%(foo_1)s))",
checkparams={
"x_m0": 1,
"sum_1": 1,
"sum_2": 2,
"z_m0": 2,
"sum_3": 1,
"sum_4": 2,
"y_m1": 2,
"z_m1": 3,
"sum_5": 1,
"sum_6": 2,
"y_m2": 2,
"foo_1": 10,
},
dialect=postgresql.dialect(),
)
elif paramstyle.qmark:
self.assert_compile(
stmt,
"INSERT INTO t (x, y, z) VALUES "
"(?, sum(?, ?), ?), "
"(sum(?, ?), ?, ?), "
"(sum(?, ?), ?, foo(?))",
checkpositional=pos_par,
dialect=sqlite.dialect(),
)
elif paramstyle.numeric:
self.assert_compile(
stmt,
"INSERT INTO t (x, y, z) VALUES "
"(:1, sum(:2, :3), :4), "
"(sum(:5, :6), :7, :8), "
"(sum(:9, :10), :11, foo(:12))",
checkpositional=pos_par,
dialect=sqlite.dialect(paramstyle="numeric"),
)
elif paramstyle.dollar:
self.assert_compile(
stmt,
"INSERT INTO t (x, y, z) VALUES "
"($1, sum($2, $3), $4), "
"(sum($5, $6), $7, $8), "
"(sum($9, $10), $11, foo($12))",
checkpositional=pos_par,
dialect=sqlite.dialect(paramstyle="numeric_dollar"),
)
else:
paramstyle.fail()
def test_insert_seq_pk_multi_values_seq_not_supported(self):
m = MetaData()
t1 = Table(
"t",
m,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("id_seq")),
primary_key=True,
),
Column("data", String),
)
stmt = t1.insert().values(
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
self.assert_compile(
stmt,
"INSERT INTO t (data) VALUES (?), (?), (?)",
dialect=sqlite.dialect(),
)
def test_insert_from_select_cte_one(self):
table1 = self.tables.mytable
cte = select(table1.c.name).where(table1.c.name == "bar").cte()
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == cte.c.name
)
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), sel
)
self.assert_compile(
ins,
"WITH anon_1 AS "
"(SELECT mytable.name AS name FROM mytable "
"WHERE mytable.name = :name_1) "
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable, anon_1 "
"WHERE mytable.name = anon_1.name",
checkparams={"name_1": "bar"},
)
def test_insert_from_select_cte_follows_insert_one(self):
dialect = default.DefaultDialect()
dialect.cte_follows_insert = True
table1 = self.tables.mytable
cte = select(table1.c.name).where(table1.c.name == "bar").cte()
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == cte.c.name
)
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), sel
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"WITH anon_1 AS "
"(SELECT mytable.name AS name FROM mytable "
"WHERE mytable.name = :name_1) "
"SELECT mytable.myid, mytable.name FROM mytable, anon_1 "
"WHERE mytable.name = anon_1.name",
checkparams={"name_1": "bar"},
dialect=dialect,
)
def test_insert_from_select_cte_two(self):
table1 = self.tables.mytable
cte = table1.select().cte("c")
stmt = cte.select()
ins = table1.insert().from_select(table1.c, stmt)
self.assert_compile(
ins,
"WITH c AS (SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) "
"INSERT INTO mytable (myid, name, description) "
"SELECT c.myid, c.name, c.description FROM c",
)
def test_insert_from_select_cte_follows_insert_two(self):
dialect = default.DefaultDialect()
dialect.cte_follows_insert = True
table1 = self.tables.mytable
cte = table1.select().cte("c")
stmt = cte.select()
ins = table1.insert().from_select(table1.c, stmt)
self.assert_compile(
ins,
"INSERT INTO mytable (myid, name, description) "
"WITH c AS (SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) "
"SELECT c.myid, c.name, c.description FROM c",
dialect=dialect,
)
def test_insert_from_select_select_alt_ordering(self):
table1 = self.tables.mytable
sel = select(table1.c.name, table1.c.myid).where(
table1.c.name == "foo"
)
ins = self.tables.myothertable.insert().from_select(
("othername", "otherid"), sel
)
self.assert_compile(
ins,
"INSERT INTO myothertable (othername, otherid) "
"SELECT mytable.name, mytable.myid FROM mytable "
"WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_insert_from_select_no_defaults(self):
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=func.foobar()),
)
table1 = self.tables.mytable
sel = select(table1.c.myid).where(table1.c.name == "foo")
ins = table.insert().from_select(["id"], sel, include_defaults=False)
self.assert_compile(
ins,
"INSERT INTO sometable (id) SELECT mytable.myid "
"FROM mytable WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_insert_from_select_with_sql_defaults(self):
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=func.foobar()),
)
table1 = self.tables.mytable
sel = select(table1.c.myid).where(table1.c.name == "foo")
ins = table.insert().from_select(["id"], sel)
self.assert_compile(
ins,
"INSERT INTO sometable (id, foo) SELECT "
"mytable.myid, foobar() AS foobar_1 "
"FROM mytable WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_insert_from_select_with_python_defaults(self):
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=12),
)
table1 = self.tables.mytable
sel = select(table1.c.myid).where(table1.c.name == "foo")
ins = table.insert().from_select(["id"], sel)
self.assert_compile(
ins,
"INSERT INTO sometable (id, foo) SELECT "
"mytable.myid, :foo AS anon_1 "
"FROM mytable WHERE mytable.name = :name_1",
# value filled in at execution time
checkparams={"name_1": "foo", "foo": None},
)
def test_insert_from_select_override_defaults(self):
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=12),
)
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.myid.label("q")).where(
table1.c.name == "foo"
)
ins = table.insert().from_select(["id", "foo"], sel)
self.assert_compile(
ins,
"INSERT INTO sometable (id, foo) SELECT "
"mytable.myid, mytable.myid AS q "
"FROM mytable WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_insert_from_select_fn_defaults(self):
metadata = MetaData()
def foo(ctx):
return 12
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default=foo),
)
table1 = self.tables.mytable
sel = select(table1.c.myid).where(table1.c.name == "foo")
ins = table.insert().from_select(["id"], sel)
self.assert_compile(
ins,
"INSERT INTO sometable (id, foo) SELECT "
"mytable.myid, :foo AS anon_1 "
"FROM mytable WHERE mytable.name = :name_1",
# value filled in at execution time
checkparams={"name_1": "foo", "foo": None},
)
def test_insert_from_select_fn_defaults_compound(self):
"""test #8073"""
metadata = MetaData()
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default="foo"),
Column("bar", Integer, default="bar"),
)
table1 = self.tables.mytable
sel = (
select(table1.c.myid)
.where(table1.c.name == "foo")
.union(select(table1.c.myid).where(table1.c.name == "foo"))
)
ins = table.insert().from_select(["id"], sel)
with expect_raises_message(
exc.CompileError,
r"Can't extend statement for INSERT..FROM SELECT to include "
r"additional default-holding column\(s\) 'foo', 'bar'. "
r"Convert the selectable to a subquery\(\) first, or pass "
r"include_defaults=False to Insert.from_select\(\) to skip these "
r"columns.",
):
ins.compile()
def test_insert_from_select_fn_defaults_compound_subquery(self):
"""test #8073"""
metadata = MetaData()
def foo(ctx):
return 12
table = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, default="foo"),
Column("bar", Integer, default="bar"),
)
table1 = self.tables.mytable
sel = (
select(table1.c.myid)
.where(table1.c.name == "foo")
.union(select(table1.c.myid).where(table1.c.name == "foo"))
.subquery()
)
ins = table.insert().from_select(["id"], sel)
self.assert_compile(
ins,
"INSERT INTO sometable (id, foo, bar) SELECT anon_1.myid, "
":foo AS anon_2, :bar AS anon_3 FROM "
"(SELECT mytable.myid AS myid FROM mytable "
"WHERE mytable.name = :name_1 UNION "
"SELECT mytable.myid AS myid FROM mytable "
"WHERE mytable.name = :name_2) AS anon_1",
checkparams={
"foo": None,
"bar": None,
"name_1": "foo",
"name_2": "foo",
},
)
def test_insert_from_select_dont_mutate_raw_columns(self):
# test [ticket:3603]
from sqlalchemy import table
table_ = table(
"mytable",
Column("foo", String),
Column("bar", String, default="baz"),
)
stmt = select(table_.c.foo)
insert = table_.insert().from_select(["foo"], stmt)
self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
self.assert_compile(
insert,
"INSERT INTO mytable (foo, bar) "
"SELECT mytable.foo, :bar AS anon_1 FROM mytable",
)
self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
self.assert_compile(
insert,
"INSERT INTO mytable (foo, bar) "
"SELECT mytable.foo, :bar AS anon_1 FROM mytable",
)
def test_insert_mix_select_values_exception(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), sel
)
assert_raises_message(
exc.InvalidRequestError,
"This construct already inserts from a SELECT",
ins.values,
othername="5",
)
def test_insert_mix_values_select_exception(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = self.tables.myothertable.insert().values(othername="5")
assert_raises_message(
exc.InvalidRequestError,
"This construct already inserts value expressions",
ins.from_select,
("otherid", "othername"),
sel,
)
def test_insert_from_select_table(self):
table1 = self.tables.mytable
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), table1
)
# note we aren't checking the number of columns right now
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable",
checkparams={},
)
def test_insert_from_select_union(self):
mytable = self.tables.mytable
name = column("name")
description = column("desc")
sel = select(name, mytable.c.description).union(
select(name, description)
)
ins = mytable.insert().from_select(
[mytable.c.name, mytable.c.description], sel
)
self.assert_compile(
ins,
"INSERT INTO mytable (name, description) "
"SELECT name, mytable.description FROM mytable "
'UNION SELECT name, "desc"',
)
def test_insert_from_select_col_values(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = table2.insert().from_select(
(table2.c.otherid, table2.c.othername), sel
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable "
"WHERE mytable.name = :name_1",
checkparams={"name_1": "foo"},
)
def test_anticipate_no_pk_composite_pk(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
)
with expect_warnings(
"Column 't.y' is marked as a member.*"
"Note that as of SQLAlchemy 1.1,"
):
self.assert_compile(
t.insert(), "INSERT INTO t (x) VALUES (:x)", params={"x": 5}
)
def test_anticipate_no_pk_composite_pk_implicit_returning(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
)
d = postgresql.dialect()
d.implicit_returning = True
with expect_warnings(
"Column 't.y' is marked as a member.*"
"Note that as of SQLAlchemy 1.1,"
):
self.assert_compile(
t.insert(),
"INSERT INTO t (x) VALUES (%(x)s)",
params={"x": 5},
dialect=d,
)
def test_anticipate_no_pk_composite_pk_prefetch(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
)
d = postgresql.dialect()
d.implicit_returning = False
with expect_warnings(
"Column 't.y' is marked as a member.*"
"Note that as of SQLAlchemy 1.1,"
):
self.assert_compile(
t.insert(),
"INSERT INTO t (x) VALUES (%(x)s)",
params={"x": 5},
dialect=d,
)
def test_anticipate_nullable_composite_pk(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True, nullable=True),
)
self.assert_compile(
t.insert(), "INSERT INTO t (x) VALUES (:x)", params={"x": 5}
)
def test_anticipate_no_pk_non_composite_pk(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True, autoincrement=False),
Column("q", Integer),
)
with expect_warnings(
"Column 't.x' is marked as a member.*may not store NULL.$"
):
self.assert_compile(
t.insert(), "INSERT INTO t (q) VALUES (:q)", params={"q": 5}
)
def test_anticipate_no_pk_non_composite_pk_implicit_returning(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True, autoincrement=False),
Column("q", Integer),
)
d = postgresql.dialect()
d.implicit_returning = True
with expect_warnings(
"Column 't.x' is marked as a member.*may not store NULL.$"
):
self.assert_compile(
t.insert(),
"INSERT INTO t (q) VALUES (%(q)s)",
params={"q": 5},
dialect=d,
)
def test_anticipate_no_pk_non_composite_pk_prefetch(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True, autoincrement=False),
Column("q", Integer),
)
d = postgresql.dialect()
d.implicit_returning = False
with expect_warnings(
"Column 't.x' is marked as a member.*may not store NULL.$"
):
self.assert_compile(
t.insert(),
"INSERT INTO t (q) VALUES (%(q)s)",
params={"q": 5},
dialect=d,
)
def test_anticipate_no_pk_lower_case_table(self):
t = table(
"t",
Column("id", Integer, primary_key=True, autoincrement=False),
Column("notpk", String(10), nullable=True),
)
with expect_warnings(
"Column 't.id' is marked as a member.*may not store NULL.$"
):
self.assert_compile(
t.insert(),
"INSERT INTO t () VALUES ()",
params={},
supports_default_values=False,
)
|
InsertTest
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/discogs/provider.py
|
{
"start": 218,
"end": 433
}
|
class ____(ProviderAccount):
def get_username(self):
return self.account.extra_data.get("username")
def get_profile_url(self):
return self.account.extra_data.get("resource_url")
|
DiscogsAccount
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_utils.py
|
{
"start": 233134,
"end": 243287
}
|
class ____(TestCase):
def assertEqualIgnoringNestedInts(self, a, b):
# unbinding NJTs allows us to compare them as essentially equal without
# caring about exact nested int comparison
def _unbind_njts(x):
if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.jagged:
return x.unbind()
else:
return x
self.assertEqual(pytree.tree_map(_unbind_njts, a), pytree.tree_map(_unbind_njts, b))
def assertEqualNoncontigAware(self, a, b):
# assertEqual() doesn't take into account lengths, so hack around this
# by comparing unbound components and shapes
self.assertEqualIgnoringNestedInts(a, b)
def _get_njt_shapes(x):
return (
x.shape
if isinstance(x, torch.Tensor) and x.is_nested
else None
)
a_shapes = pytree.tree_map(_get_njt_shapes, a)
b_shapes = pytree.tree_map(_get_njt_shapes, b)
self.assertEqual(a_shapes, b_shapes)
@contextlib.contextmanager
def branch_nested_state(self):
"""Context manager to branch and restore the nested tensor state."""
nested_tensor_module = torch.nested._internal.nested_tensor
original_tensor_symint_registry = nested_tensor_module._tensor_symint_registry.copy()
original_tensor_id_counter = nested_tensor_module._tensor_id_counter
try:
yield
finally:
nested_tensor_module._tensor_id_counter = original_tensor_id_counter
nested_tensor_module._tensor_symint_registry = original_tensor_symint_registry
def munge_exc(e, *, suppress_suffix=True, suppress_prefix=True, file=None, skip=0):
from torch._dynamo.trace_rules import _as_posix_path
if file is None:
file = inspect.stack()[1 + skip].filename # skip one frame
file = _as_posix_path(file)
s = _as_posix_path(str(e))
# Remove everything that looks like stack frames in NOT this file
def repl_frame(m):
if m.group(1) != file:
return ""
# Don't accept top-level, even for this script, these will wobble
# depending on how the testing script was invoked
if m.group(2) == "<module>":
return ""
return m.group(0)
s = re.sub(r' File "([^"]+)", line \d+, in (.+)\n( .+\n( +[~^]+ *\n)?)+', repl_frame, s)
s = re.sub(r"line \d+", "line N", s)
s = re.sub(r".py:\d+", ".py:N", s)
s = re.sub(r'https:/([a-zA-Z0-9_.-]+)', r'https://\1', s)
s = re.sub(file, _as_posix_path(os.path.basename(file)), s)
s = re.sub(_as_posix_path(os.path.join(os.path.dirname(torch.__file__), "")), "", s)
if suppress_suffix:
s = re.sub(r"\n*Set TORCH_LOGS.+", "", s, flags=re.DOTALL)
s = re.sub(r"\n*You can suppress this exception.+", "", s, flags=re.DOTALL)
s = re.sub(r"\n*Set TORCHDYNAMO_VERBOSE=1.+", "", s, flags=re.DOTALL)
if suppress_prefix:
s = re.sub(r"Cannot export model.+\n\n", "", s)
s = re.sub(r" +$", "", s, flags=re.MULTILINE)
return s
@contextmanager
def check_leaked_tensors(limit=1, matched_type=torch.Tensor):
"""Wrap around operations you want to ensure are not leaking tensor memory.
This code intentionally ignores other reference cycles, which can be benign and which we have plenty
of in pytorch code. It focuses on any reference cycles that directly or indirectly result holding a Tensor alive,
since this is likely a more serious leak than typical python refcycles.
limit specifies how many tensors to dump debug graphs for (default=1)
"""
def match_obj(obj):
return isinstance(obj, matched_type)
try:
gc.collect()
gc.set_debug(gc.DEBUG_SAVEALL)
garbage_objs = [] # type: ignore[var-annotated]
# run the user code, after cleaning any existing refcycles, and then check for new ones
# also allow usercode to check the garbage objs (e.g. for assertion) after exiting ctxmgr
yield garbage_objs
gc.collect()
garbage_objs.extend(filter(match_obj, gc.garbage))
num_garbage_objs = len(garbage_objs)
if num_garbage_objs > 0:
warnings.warn(
f"{num_garbage_objs} tensors were found in the garbage. Did you introduce a reference cycle?", stacklevel=2
)
try:
import objgraph # type: ignore[import-not-found,import-untyped]
warnings.warn(
f"Dumping first {limit} objgraphs of leaked {matched_type}s rendered to png", stacklevel=2
)
for g in garbage_objs[:limit]:
objgraph.show_backrefs([g], max_depth=10)
except ImportError:
warnings.warn("`pip install objgraph` to enable memory leak debugging", stacklevel=2)
finally:
gc.set_debug(0)
def remove_cpp_extensions_build_root():
"""
Removes the default root folder under which extensions are built.
"""
default_build_root = cpp_extension.get_default_build_root()
if os.path.exists(default_build_root):
if IS_WINDOWS:
# rmtree returns permission error: [WinError 5] Access is denied
# on Windows, this is a workaround
subprocess.run(["rm", "-rf", default_build_root], stdout=subprocess.PIPE)
else:
shutil.rmtree(default_build_root, ignore_errors=True)
def install_cpp_extension(extension_root):
# Wipe the build / install dirs if they exist
build_dir = os.path.join(extension_root, "build")
install_dir = os.path.join(extension_root, "install")
for d in (build_dir, install_dir):
if os.path.exists(d):
shutil.rmtree(d)
# Build the extension
cmd = [sys.executable, "-m", "pip", "install", extension_root, "-v", "--no-build-isolation", "--root", install_dir]
return_code = shell(cmd, cwd=extension_root, env=os.environ)
if return_code != 0:
raise RuntimeError(f"build failed for cpp extension at {extension_root}")
mod_install_dir = None
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(install_dir):
for directory in directories:
if "-packages" in directory:
mod_install_dir = os.path.join(root, directory)
if mod_install_dir is None:
raise RuntimeError(f"installation failed for cpp extension at {extension_root}")
if mod_install_dir not in sys.path:
sys.path.insert(0, mod_install_dir)
# Decorator to provide a helper to load inline extensions to a temp directory
def scoped_load_inline(func):
@wraps(func)
def wrapper(*args, **kwargs):
def load_inline(*args, **kwargs):
if IS_WINDOWS:
# TODO(xmfan): even using TemporaryDirectoryName will result in permission error
return cpp_extension.load_inline(*args, **kwargs)
assert "build_directory" not in kwargs
with TemporaryDirectoryName() as temp_dir_name:
if kwargs.get("verbose", False):
print(f'Using temporary extension directory {temp_dir_name}...', file=sys.stderr)
kwargs["build_directory"] = temp_dir_name
return cpp_extension.load_inline(*args, **kwargs)
return func(*args, load_inline=load_inline, **kwargs)
return wrapper
def recover_orig_fp32_precision(fn):
@contextlib.contextmanager
def recover():
old_mkldnn_conv_p = torch.backends.mkldnn.conv.fp32_precision # type: ignore[attr-defined]
old_mkldnn_rnn_p = torch.backends.mkldnn.rnn.fp32_precision # type: ignore[attr-defined]
old_mkldnn_matmul_p = torch.backends.mkldnn.matmul.fp32_precision # type: ignore[attr-defined]
old_cudnn_conv_p = torch.backends.cudnn.conv.fp32_precision # type: ignore[attr-defined]
old_cudnn_rnn_p = torch.backends.cudnn.rnn.fp32_precision # type: ignore[attr-defined]
old_cuda_matmul_p = torch.backends.cuda.matmul.fp32_precision
try:
yield
finally:
torch.backends.mkldnn.conv.fp32_precision = old_mkldnn_conv_p # type: ignore[attr-defined]
torch.backends.mkldnn.rnn.fp32_precision = old_mkldnn_rnn_p # type: ignore[attr-defined]
torch.backends.mkldnn.matmul.fp32_precision = old_mkldnn_matmul_p # type: ignore[attr-defined]
torch.backends.cudnn.conv.fp32_precision = old_cudnn_conv_p # type: ignore[attr-defined]
torch.backends.cudnn.rnn.fp32_precision = old_cudnn_rnn_p # type: ignore[attr-defined]
torch.backends.cuda.matmul.fp32_precision = old_cuda_matmul_p
return recover()(fn)
def skipIfPythonVersionMismatch(predicate):
vi = sys.version_info
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if predicate(vi.major, vi.minor, vi.micro):
return fn(self, *args, **kwargs)
else:
raise unittest.SkipTest("Python version mismatch")
return wrap_fn
return dec_fn
# Decorator to patch multiple test class members for the duration of the subtest
def patch_test_members(updates: dict[str, Any]):
def decorator(test_func):
@wraps(test_func)
def wrapper(self, *args, **kwargs):
# Store the original values of the specified members
original_values = {member: getattr(self, member) for member in updates}
# Update the members before running the subtest
for member, value in updates.items():
setattr(self, member, value)
# Run the test function, allowing subtests to run
try:
return test_func(self, *args, **kwargs)
finally:
# Restore the original values of the specified members after the subtest finishes
for member, original_value in original_values.items():
setattr(self, member, original_value)
return wrapper
return decorator
|
NestedTensorTestCase
|
python
|
django__django
|
django/db/backends/base/client.py
|
{
"start": 30,
"end": 989
}
|
class ____:
"""Encapsulate backend-specific methods for opening a client shell."""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def __del__(self):
del self.connection
@classmethod
def settings_to_cmd_args_env(cls, settings_dict, parameters):
raise NotImplementedError(
"subclasses of BaseDatabaseClient must provide a "
"settings_to_cmd_args_env() method or override a runshell()."
)
def runshell(self, parameters):
args, env = self.settings_to_cmd_args_env(
self.connection.settings_dict, parameters
)
env = {**os.environ, **env} if env else None
subprocess.run(args, env=env, check=True)
|
BaseDatabaseClient
|
python
|
sympy__sympy
|
sympy/codegen/ast.py
|
{
"start": 52015,
"end": 52702
}
|
class ____(Token):
r""" Represents print command in the code.
Parameters
==========
formatstring : str
*args : Basic instances (or convertible to such through sympify)
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy import pycode
>>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g\\n")))
print("coordinate: %12.5g %12.5g\n" % (x, y), end="")
"""
__slots__ = _fields = ('print_args', 'format_string', 'file')
defaults = {'format_string': none, 'file': none}
_construct_print_args = staticmethod(_mk_Tuple)
_construct_format_string = QuotedString
_construct_file = Stream
|
Print
|
python
|
matplotlib__matplotlib
|
tools/run_examples.py
|
{
"start": 438,
"end": 2985
}
|
class ____:
def __init__(self, backend, elapsed, failed):
self.backend = backend
self.elapsed = elapsed
self.failed = failed
def __str__(self):
s = ""
if self.backend:
s += f"{self.backend}: "
s += f"{self.elapsed}ms"
if self.failed:
s += " (failed!)"
return s
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"--backend", action="append",
help=("backend to test; can be passed multiple times; defaults to the "
"default backend"))
parser.add_argument(
"--include-sgskip", action="store_true",
help="do not filter out *_sgskip.py examples")
parser.add_argument(
"--rundir", type=Path,
help=("directory from where the tests are run; defaults to a "
"temporary directory"))
parser.add_argument(
"paths", nargs="*", type=Path,
help="examples to run; defaults to all examples (except *_sgskip.py)")
args = parser.parse_args()
root = Path(__file__).resolve().parent.parent / "examples"
paths = args.paths if args.paths else sorted(root.glob("**/*.py"))
if not args.include_sgskip:
paths = [path for path in paths if not path.stem.endswith("sgskip")]
relpaths = [path.resolve().relative_to(root) for path in paths]
width = max(len(str(relpath)) for relpath in relpaths)
for relpath in relpaths:
print(str(relpath).ljust(width + 1), end="", flush=True)
runinfos = []
with ExitStack() as stack:
if args.rundir:
cwd = args.rundir / relpath.with_suffix("")
cwd.mkdir(parents=True)
else:
cwd = stack.enter_context(TemporaryDirectory())
with tokenize.open(root / relpath) as src:
Path(cwd, relpath.name).write_text(
_preamble + src.read(), encoding="utf-8")
for backend in args.backend or [None]:
env = {**os.environ}
if backend is not None:
env["MPLBACKEND"] = backend
start = time.perf_counter()
proc = subprocess.run([sys.executable, relpath.name],
cwd=cwd, env=env)
elapsed = round(1000 * (time.perf_counter() - start))
runinfos.append(RunInfo(backend, elapsed, proc.returncode))
print("\t".join(map(str, runinfos)))
if __name__ == "__main__":
main()
|
RunInfo
|
python
|
spack__spack
|
lib/spack/spack/llnl/util/tty/color.py
|
{
"start": 13960,
"end": 14599
}
|
class ____:
def __init__(self, stream, color=None):
self._stream = stream
self._color = color
def write(self, string, **kwargs):
raw = kwargs.get("raw", False)
raw_write = getattr(self._stream, "write")
color = self._color
if self._color is None:
if raw:
color = True
else:
color = get_color_when()
raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs):
raw = kwargs.get("raw", False)
for string in sequence:
self.write(string, self.color, raw=raw)
|
ColorStream
|
python
|
xlwings__xlwings
|
xlwings/main.py
|
{
"start": 744,
"end": 2594
}
|
class ____:
def __init__(self, impl):
self.impl = impl
@property
def api(self):
"""
Returns the native object (``pywin32`` or ``appscript`` obj)
of the engine being used.
"""
return self.impl.api
def __call__(self, name_or_index):
return self._wrap(impl=self.impl(name_or_index))
def __len__(self):
return len(self.impl)
@property
def count(self):
"""
Returns the number of objects in the collection.
"""
return len(self)
def __iter__(self):
for impl in self.impl:
yield self._wrap(impl=impl)
def __getitem__(self, key):
if isinstance(key, numbers.Number):
length = len(self)
if key >= length:
raise IndexError("Index %s out of range (%s elements)" % (key, length))
if key < 0:
if key < -length:
raise IndexError(
"Index %s out of range (%s elements)" % (key, length)
)
key += length
return self(key + 1)
elif isinstance(key, slice):
raise ValueError(
self.impl.__class__.__name__ + " object does not support slicing"
)
else:
return self(key)
def __contains__(self, key):
return key in self.impl
# used by repr - by default the name of the collection class, but can be overridden
@property
def _name(self):
return self.__class__.__name__
def __repr__(self):
r = []
for i, x in enumerate(self):
if i == 3:
r.append("...")
break
else:
r.append(repr(x))
return "{}({})".format(self._name, "[" + ", ".join(r) + "]")
|
Collection
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-a-value-of-a-mysterious-function-closest-to-target.py
|
{
"start": 1492,
"end": 1890
}
|
class ____(object):
def closestToTarget(self, arr, target):
"""
:type arr: List[int]
:type target: int
:rtype: int
"""
result, dp = float("inf"), set() # at most O(logm) dp states
for x in arr:
dp = {x}|{f&x for f in dp}
for f in dp:
result = min(result, abs(f-target))
return result
|
Solution2
|
python
|
huggingface__transformers
|
tests/models/idefics3/test_modeling_idefics3.py
|
{
"start": 1421,
"end": 5267
}
|
class ____:
def __init__(
self,
parent,
is_training=True,
batch_size=2,
scale_factor=2,
num_images=2,
vision_config={
"image_size": 16,
"patch_size": 4,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
text_config={
"vocab_size": 100,
"hidden_size": 64,
"intermediate_size": 56,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 256,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"pad_token_id": 2,
"bos_token_id": 0,
"eos_token_id": 1,
"image_token_id": 57,
"tie_word_embeddings": False,
"rope_theta": 10000.0,
"sliding_window": 32,
"attention_dropout": 0.0,
},
use_cache=False,
tie_word_embeddings=False,
image_token_id=57,
):
self.parent = parent
self.pad_token_id = text_config["pad_token_id"]
self.is_training = is_training
self.batch_size = batch_size
self.num_images = num_images
self.scale_factor = scale_factor
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (self.scale_factor**2))
* self.num_images
)
self.use_cache = use_cache
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
# Hack - add properties here so use common tests
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_config = vision_config
self.text_config = text_config
def get_config(self):
return Idefics3Config(
use_cache=self.use_cache,
image_token_id=self.image_token_id,
tie_word_embeddings=self.tie_word_embeddings,
vision_config=self.vision_config,
text_config=self.text_config,
vocab_size=self.vocab_size,
scale_factor=self.scale_factor,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_images,
3, # Idefics3ImageProcessor always generates RGB pixel values
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
Idefics3VisionText2TextModelTester
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py
|
{
"start": 9496,
"end": 10260
}
|
class ____(BaseConfig):
"""Full refresh test config
Attributes:
ignored_fields for each stream, list of fields path. Path should be in format "object_key/object_key2"
"""
config_path: str = config_path
configured_catalog_path: Optional[str] = configured_catalog_path
timeout_seconds: int = timeout_seconds
deployment_mode: Optional[str] = deployment_mode
ignored_fields: Optional[Mapping[str, List[IgnoredFieldsConfiguration]]] = Field(
description="For each stream, list of fields path ignoring in sequential reads test"
)
client_container_config: Optional[ClientContainerConfig] = Field(
description="Information required to run a client Docker container before each test.",
)
|
FullRefreshConfig
|
python
|
prabhupant__python-ds
|
data_structures/circular_linked_list/traversal.py
|
{
"start": 0,
"end": 706
}
|
class ____():
def __init__(self, val):
self.val = val
self.next = None
def push(head, val):
if not head:
head = Node(val)
head.next = head
return
curr = head
while curr:
if curr.next == head:
break
curr = curr.next
curr.next = Node(val)
curr.next.next = head
def print_list(head):
if not head:
return
curr = head
while curr:
print(curr.val, end=" ")
curr = curr.next
if curr == head:
break
first = Node(1)
second = Node(2)
third = Node(3)
first.next = second
second.next = third
third.next = first
print_list(first)
push(first, 4)
print_list(first)
|
Node
|
python
|
numba__numba
|
numba/tests/test_parallel_backend.py
|
{
"start": 2869,
"end": 3109
}
|
class ____(runnable):
def __call__(self):
cfunc = jit(**self._options)(linalg)
a = 4
b = 10
expected = linalg(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
|
linalg_runner
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 362929,
"end": 363861
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateProjectV2ItemFieldValue"""
__schema__ = github_schema
__field_names__ = ("project_id", "item_id", "field_id", "value", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project."""
item_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="itemId")
"""The ID of the item to be updated."""
field_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="fieldId")
"""The ID of the field to be updated."""
value = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2FieldValue), graphql_name="value")
"""The value which will be set on the field."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
UpdateProjectV2ItemFieldValueInput
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/test_sponsored_streams.py
|
{
"start": 1639,
"end": 17115
}
|
class ____(TestCase):
@property
def _config(self):
return ConfigBuilder().build()
def _given_oauth_and_profiles(self, http_mocker: HttpMocker, config: dict) -> None:
"""
Authenticate and get profiles
"""
http_mocker.post(
OAuthRequestBuilder.oauth_endpoint(
client_id=config["client_id"], client_secred=config["client_secret"], refresh_token=config["refresh_token"]
).build(),
OAuthResponseBuilder.token_response().build(),
)
http_mocker.get(
ProfilesRequestBuilder.profiles_endpoint(client_id=config["client_id"], client_access_token=config["access_token"]).build(),
ProfilesResponseBuilder.profiles_response().with_record(ProfilesRecordBuilder.profiles_record()).build(),
)
@HttpMocker()
def test_given_non_breaking_error_when_read_ad_groups_then_stream_is_ignored(self, http_mocker: HttpMocker):
"""
Check ad groups stream: non-breaking errors are ignored
When error of this kind happen, we warn and then keep syncing another streams
"""
self._given_oauth_and_profiles(http_mocker, self._config)
non_breaking_error = ErrorRecordBuilder.non_breaking_error()
http_mocker.post(
SponsoredBrandsRequestBuilder.ad_groups_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
ErrorResponseBuilder.non_breaking_error_response().with_record(non_breaking_error).with_status_code(400).build(),
)
output = read_stream("sponsored_brands_ad_groups", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
info_logs = get_log_messages_by_log_level(output.logs, LogLevel.INFO)
assert any([non_breaking_error.build().get("details") in info for info in info_logs])
@HttpMocker()
def test_given_breaking_error_when_read_ad_groups_then_stream_stop_syncing(self, http_mocker: HttpMocker):
"""
Check ad groups stream: when unknown error happen we stop syncing with raising the error
"""
self._given_oauth_and_profiles(http_mocker, self._config)
breaking_error = ErrorRecordBuilder.breaking_error()
http_mocker.post(
SponsoredBrandsRequestBuilder.ad_groups_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
ErrorResponseBuilder.breaking_error_response().with_record(breaking_error).with_status_code(500).build(),
)
with patch("time.sleep", return_value=None):
output = read_stream("sponsored_brands_ad_groups", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
error_logs = get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
assert any([breaking_error.build().get("message") in error for error in error_logs])
@HttpMocker()
def test_given_one_page_when_read_ad_groups_then_return_records(self, http_mocker: HttpMocker):
"""
Check ad groups stream: normal full refresh sync without pagination
"""
stream_name = "sponsored_brands_ad_groups"
data_field = "adGroups"
record_id_path = "adGroupId"
self._given_oauth_and_profiles(http_mocker, self._config)
http_mocker.post(
SponsoredBrandsRequestBuilder.ad_groups_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
_a_response(stream_name, data_field, None).with_record(_a_record(stream_name, data_field, record_id_path)).build(),
)
output = read_stream("sponsored_brands_ad_groups", SyncMode.full_refresh, self._config)
print(output.records)
assert len(output.records) == 1
@HttpMocker()
def test_given_many_pages_when_read_ad_groups_then_return_records(self, http_mocker: HttpMocker):
"""
Check ad groups stream: normal full refresh sync with pagination
"""
stream_name = "sponsored_brands_ad_groups"
data_field = "adGroups"
record_id_path = "adGroupId"
pagination_strategy = SponsoredCursorBasedPaginationStrategy()
paginated_request_body = json.dumps({"nextToken": "next-page-token", "maxResults": 100})
self._given_oauth_and_profiles(http_mocker, self._config)
http_mocker.post(
SponsoredBrandsRequestBuilder.ad_groups_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
_a_response(stream_name, data_field, pagination_strategy)
.with_record(_a_record(stream_name, data_field, record_id_path))
.with_pagination()
.build(),
)
http_mocker.post(
SponsoredBrandsRequestBuilder.ad_groups_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(paginated_request_body)
.build(),
_a_response(stream_name, data_field, pagination_strategy)
.with_record(_a_record(stream_name, data_field, record_id_path))
.build(),
)
output = read_stream("sponsored_brands_ad_groups", SyncMode.full_refresh, self._config)
assert len(output.records) == 2
@HttpMocker()
def test_given_non_breaking_error_when_read_campaigns_then_stream_is_ignored(self, http_mocker: HttpMocker):
"""
Check campaigns stream: non-breaking errors are ignored
When error of this kind happen, we warn and then keep syncing another streams
"""
self._given_oauth_and_profiles(http_mocker, self._config)
non_breaking_error = ErrorRecordBuilder.non_breaking_error()
http_mocker.post(
SponsoredBrandsRequestBuilder.campaigns_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
ErrorResponseBuilder.non_breaking_error_response().with_record(non_breaking_error).with_status_code(400).build(),
)
output = read_stream("sponsored_brands_campaigns", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
info_logs = get_log_messages_by_log_level(output.logs, LogLevel.INFO)
assert any([non_breaking_error.build().get("details") in info for info in info_logs])
@HttpMocker()
def test_given_breaking_error_when_read_campaigns_then_stream_stop_syncing(self, http_mocker: HttpMocker):
"""
Check campaigns stream: when unknown error happen we stop syncing with raising the error
"""
self._given_oauth_and_profiles(http_mocker, self._config)
breaking_error = ErrorRecordBuilder.breaking_error()
http_mocker.post(
SponsoredBrandsRequestBuilder.campaigns_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
ErrorResponseBuilder.breaking_error_response().with_record(breaking_error).with_status_code(500).build(),
)
with patch("time.sleep", return_value=None):
output = read_stream("sponsored_brands_campaigns", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
error_logs = get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
assert any([breaking_error.build().get("message") in error for error in error_logs])
@HttpMocker()
def test_given_one_page_when_read_campaigns_then_return_records(self, http_mocker: HttpMocker):
"""
Check campaigns stream: normal full refresh sync without pagination
"""
self._given_oauth_and_profiles(http_mocker, self._config)
stream_name = "sponsored_brands_campaigns"
data_field = "campaigns"
record_id_path = "campaignId"
http_mocker.post(
SponsoredBrandsRequestBuilder.campaigns_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
_a_response(stream_name, data_field, None).with_record(_a_record(stream_name, data_field, record_id_path)).build(),
)
output = read_stream("sponsored_brands_campaigns", SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@HttpMocker()
def test_given_many_pages_when_read_campaigns_then_return_records(self, http_mocker: HttpMocker):
"""
Check campaigns stream: normal full refresh sync with pagination
"""
stream_name = "sponsored_brands_campaigns"
data_field = "campaigns"
record_id_path = "campaignId"
pagination_strategy = SponsoredCursorBasedPaginationStrategy()
paginated_request_body = json.dumps({"nextToken": "next-page-token", "maxResults": 100})
self._given_oauth_and_profiles(http_mocker, self._config)
http_mocker.post(
SponsoredBrandsRequestBuilder.campaigns_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(_DEFAULT_REQUEST_BODY)
.build(),
_a_response(stream_name, data_field, pagination_strategy)
.with_record(_a_record(stream_name, data_field, record_id_path))
.with_pagination()
.build(),
)
http_mocker.post(
SponsoredBrandsRequestBuilder.campaigns_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0]
)
.with_request_body(paginated_request_body)
.build(),
_a_response(stream_name, data_field, pagination_strategy)
.with_record(_a_record(stream_name, data_field, record_id_path))
.build(),
)
output = read_stream("sponsored_brands_campaigns", SyncMode.full_refresh, self._config)
assert len(output.records) == 2
@HttpMocker()
def test_given_non_breaking_error_when_read_keywords_then_stream_is_ignored(self, http_mocker: HttpMocker):
"""
Check keywords stream: non-breaking errors are ignored
When error of this kind happen, we warn and then keep syncing another streams
"""
self._given_oauth_and_profiles(http_mocker, self._config)
non_breaking_error = ErrorRecordBuilder.non_breaking_error()
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100
).build(),
ErrorResponseBuilder.non_breaking_error_response().with_record(non_breaking_error).with_status_code(400).build(),
)
output = read_stream("sponsored_brands_keywords", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
info_logs = get_log_messages_by_log_level(output.logs, LogLevel.INFO)
assert any([non_breaking_error.build().get("details") in info for info in info_logs])
@HttpMocker()
def test_given_breaking_error_when_read_keywords_then_stream_stop_syncing(self, http_mocker: HttpMocker):
"""
Check keywords stream: when unknown error happen we stop syncing with raising the error
"""
self._given_oauth_and_profiles(http_mocker, self._config)
breaking_error = ErrorRecordBuilder.breaking_error()
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100
).build(),
ErrorResponseBuilder.breaking_error_response().with_record(breaking_error).with_status_code(500).build(),
)
with patch("time.sleep", return_value=None):
output = read_stream("sponsored_brands_keywords", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
error_logs = get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
assert any([breaking_error.build().get("message") in error for error in error_logs])
@HttpMocker()
def test_given_one_page_when_read_keywords_then_return_records(self, http_mocker: HttpMocker):
"""
Check keywords stream: normal full refresh sync without pagination
"""
self._given_oauth_and_profiles(http_mocker, self._config)
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100
).build(),
SponsoredBrandsResponseBuilder.keywords_response().with_record(SponsoredBrandsRecordBuilder.keywords_record()).build(),
)
output = read_stream("sponsored_brands_keywords", SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@HttpMocker()
def test_given_many_pages_when_read_keywords_then_return_records(self, http_mocker: HttpMocker):
"""
Check keywords stream: normal full refresh sync with pagination
"""
self._given_oauth_and_profiles(http_mocker, self._config)
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100
).build(),
SponsoredBrandsResponseBuilder.keywords_response(CountBasedPaginationStrategy())
.with_record(SponsoredBrandsRecordBuilder.keywords_record())
.with_pagination()
.build(),
)
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100, start_index=100
).build(),
SponsoredBrandsResponseBuilder.keywords_response(CountBasedPaginationStrategy())
.with_record(SponsoredBrandsRecordBuilder.keywords_record())
.with_pagination()
.build(),
)
http_mocker.get(
SponsoredBrandsRequestBuilder.keywords_endpoint(
self._config["client_id"], self._config["access_token"], self._config["profiles"][0], limit=100, start_index=200
).build(),
SponsoredBrandsResponseBuilder.keywords_response().with_record(SponsoredBrandsRecordBuilder.keywords_record()).build(),
)
output = read_stream("sponsored_brands_keywords", SyncMode.full_refresh, self._config)
assert len(output.records) == 201
|
TestSponsoredBrandsStreamsFullRefresh
|
python
|
apache__airflow
|
providers/snowflake/src/airflow/providers/snowflake/operators/snowflake.py
|
{
"start": 8468,
"end": 11878
}
|
class ____(SQLIntervalCheckOperator):
"""
Checks that the metrics given as SQL expressions are within tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:param snowflake_conn_id: Reference to
:ref:`Snowflake connection id<howto/connection:snowflake>`
:param autocommit: if True, each command is automatically committed.
(default value: True)
:param parameters: (optional) the parameters to render the SQL query with.
:param warehouse: name of warehouse (will overwrite any warehouse
defined in the connection's extra JSON)
:param database: name of database (will overwrite database defined
in connection)
:param schema: name of schema (will overwrite schema defined in
connection)
:param role: name of role (will overwrite any role defined in
connection's extra JSON)
:param authenticator: authenticator for Snowflake.
'snowflake' (default) to use the internal Snowflake authenticator
'externalbrowser' to authenticate using your web browser and
Okta, ADFS or any other SAML 2.0-compliant identify provider
(IdP) that has been defined for your account
'https://<your_okta_account_name>.okta.com' to authenticate
through native Okta.
:param session_parameters: You can set session-level parameters at
the time you connect to Snowflake
"""
template_fields: Sequence[str] = tuple(
set(SQLIntervalCheckOperator.template_fields) | {"snowflake_conn_id"}
)
conn_id_field = "snowflake_conn_id"
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = "ds",
days_back: SupportsAbs[int] = -7,
snowflake_conn_id: str = "snowflake_default",
warehouse: str | None = None,
database: str | None = None,
role: str | None = None,
schema: str | None = None,
authenticator: str | None = None,
session_parameters: dict | None = None,
**kwargs,
) -> None:
self.snowflake_conn_id = snowflake_conn_id
if any([warehouse, database, role, schema, authenticator, session_parameters]):
hook_params = kwargs.pop("hook_params", {})
kwargs["hook_params"] = {
"warehouse": warehouse,
"database": database,
"role": role,
"schema": schema,
"authenticator": authenticator,
"session_parameters": session_parameters,
**hook_params,
}
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
conn_id=snowflake_conn_id,
**kwargs,
)
self.query_ids: list[str] = []
|
SnowflakeIntervalCheckOperator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
|
{
"start": 1485,
"end": 7693
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testSum(self):
reducer = grouping.Reducer(
init_func=lambda _: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.TensorShape([]),
expected_output=[(i - 1) * i, i * i])
@combinations.generate(test_base.default_test_combinations())
def testAverage(self):
def reduce_fn(x, y):
return (x[0] * x[1] + math_ops.cast(y, dtypes.float32)) / (
x[1] + 1), x[1] + 1
reducer = grouping.Reducer(
init_func=lambda _: (0.0, 0.0),
reduce_func=reduce_fn,
finalize_func=lambda x, _: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).apply(
grouping.group_by_reducer(
lambda x: math_ops.cast(x, dtypes.int64) % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.TensorShape([]),
expected_output=[i - 1, i])
@combinations.generate(test_base.default_test_combinations())
def testConcat(self):
components = np.array(list("abcdefghijklmnopqrst")).view(np.char.chararray)
reducer = grouping.Reducer(
init_func=lambda x: "",
reduce_func=lambda x, y: x + y[0],
finalize_func=lambda x: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensor_slices(components),
dataset_ops.Dataset.range(2 * i))).apply(
grouping.group_by_reducer(lambda x, y: y % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.TensorShape([]),
expected_output=[b"acegikmoqs"[:i], b"bdfhjlnprt"[:i]])
@combinations.generate(test_base.default_test_combinations())
def testSparseSum(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1], dtype=np.int64)),
dense_shape=np.array([1, 1]))
reducer = grouping.Reducer(
init_func=lambda _: _sparse(np.int64(0)),
reduce_func=lambda x, y: _sparse(x.values[0] + y.values[0]),
finalize_func=lambda x: x.values[0])
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).map(_sparse).apply(
grouping.group_by_reducer(lambda x: x.values[0] % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.TensorShape([]),
expected_output=[(i - 1) * i, i * i])
@combinations.generate(test_base.default_test_combinations())
def testChangingStateShape(self):
def reduce_fn(x, _):
# Statically known rank, but dynamic length.
larger_dim = array_ops.concat([x[0], x[0]], 0)
# Statically unknown rank.
larger_rank = array_ops.expand_dims(x[1], 0)
return larger_dim, larger_rank
reducer = grouping.Reducer(
init_func=lambda x: ([0], 1),
reduce_func=reduce_fn,
finalize_func=lambda x, y: (x, y))
for i in range(1, 11):
dataset = dataset_ops.Dataset.from_tensors(np.int64(0)).repeat(i).apply(
grouping.group_by_reducer(lambda x: x, reducer))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual([None], dataset_output_shapes[0].as_list())
self.assertIs(None, dataset_output_shapes[1].ndims)
get_next = self.getNext(dataset)
x, y = self.evaluate(get_next())
self.assertAllEqual([0] * (2**i), x)
self.assertAllEqual(np.array(1, ndmin=i), y)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testTypeMismatch(self):
reducer = grouping.Reducer(
init_func=lambda x: constant_op.constant(1, dtype=dtypes.int32),
reduce_func=lambda x, y: constant_op.constant(1, dtype=dtypes.int64),
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(TypeError):
dataset.apply(
grouping.group_by_reducer(lambda _: np.int64(0), reducer))
# TODO(b/78665031): Remove once non-scalar keys are supported.
@combinations.generate(test_base.default_test_combinations())
def testInvalidKeyShape(self):
reducer = grouping.Reducer(
init_func=lambda x: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(ValueError):
dataset.apply(
grouping.group_by_reducer(lambda _: np.int64((0, 0)), reducer))
# TODO(b/78665031): Remove once non-int64 keys are supported.
@combinations.generate(test_base.default_test_combinations())
def testInvalidKeyType(self):
reducer = grouping.Reducer(
init_func=lambda x: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(ValueError):
dataset.apply(
grouping.group_by_reducer(lambda _: "wrong", reducer))
@combinations.generate(test_base.default_test_combinations())
def testTuple(self):
def init_fn(_):
return np.array([], dtype=np.int64), np.int64(0)
def reduce_fn(state, value):
s1, s2 = state
v1, v2 = value
return array_ops.concat([s1, [v1]], 0), s2 + v2
def finalize_fn(s1, s2):
return s1, s2
reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10), dataset_ops.Dataset.range(10))).apply(
grouping.group_by_reducer(lambda x, y: np.int64(0), reducer))
get_next = self.getNext(dataset)
x, y = self.evaluate(get_next())
self.assertAllEqual(x, np.asarray([x for x in range(10)]))
self.assertEqual(y, 45)
|
GroupByReducerTest
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/expressions/base.py
|
{
"start": 7043,
"end": 7976
}
|
class ____(Expr):
__slots__ = ("index", "table_ref")
_non_child = ("dtype", "index", "table_ref")
index: int
table_ref: plc.expressions.TableReference
def __init__(
self,
dtype: DataType,
index: int,
table_ref: plc.expressions.TableReference,
column: Expr,
) -> None:
if not isinstance(column, Col):
raise TypeError("Column reference should only apply to columns")
self.dtype = dtype
self.index = index
self.table_ref = table_ref
self.is_pointwise = True
self.children = (column,)
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
raise NotImplementedError(
"Only expect this node as part of an expression translated to libcudf AST."
)
|
ColRef
|
python
|
python__mypy
|
mypyc/irbuild/for_helpers.py
|
{
"start": 42567,
"end": 43680
}
|
class ____(ForGenerator):
"""Generate optimized IR for a for loop counting from 0 to infinity."""
def init(self) -> None:
builder = self.builder
# Create a register to store the state of the loop index and
# initialize this register along with the loop index to 0.
zero = Integer(0)
self.index_reg = builder.maybe_spill_assignable(zero)
self.index_target: Register | AssignmentTarget = builder.get_assignment_target(self.index)
def gen_step(self) -> None:
builder = self.builder
line = self.line
# We can safely assume that the integer is short, since we are not going to wrap
# around a 63-bit integer.
# NOTE: This would be questionable if short ints could be 32 bits.
new_val = builder.int_op(
short_int_rprimitive, builder.read(self.index_reg, line), Integer(1), IntOp.ADD, line
)
builder.assign(self.index_reg, new_val, line)
def begin_body(self) -> None:
self.builder.assign(self.index_target, self.builder.read(self.index_reg), self.line)
|
ForInfiniteCounter
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_atproto_dashboard/src/project_atproto_dashboard/defs/modeling.py
|
{
"start": 474,
"end": 1542
}
|
class ____(DagsterDbtTranslator):
def get_asset_spec(
self,
manifest: Mapping[str, Any],
unique_id: str,
project: Optional[DbtProject],
) -> dg.AssetSpec:
dbt_resource_props = get_node(manifest, unique_id)
asset_path = dbt_resource_props["fqn"][1:-1]
if asset_path:
group_name = "_".join(asset_path)
else:
group_name = "default"
if dbt_resource_props["resource_type"] == "source":
asset_key = dg.AssetKey(dbt_resource_props["name"])
else:
asset_key = super().get_asset_key(dbt_resource_props)
return dg.AssetSpec(
key=asset_key,
group_name=group_name,
)
# start_dbt_assets
@dbt_assets(
manifest=dbt_project.manifest_path,
dagster_dbt_translator=CustomizedDagsterDbtTranslator(),
)
def dbt_bluesky(context: dg.AssetExecutionContext, dbt: DbtCliResource):
yield from (dbt.cli(["build"], context=context).stream().fetch_row_counts())
# end_dbt_assets
|
CustomizedDagsterDbtTranslator
|
python
|
huggingface__transformers
|
tests/quantization/bnb/test_mixed_int8.py
|
{
"start": 24839,
"end": 26256
}
|
class ____(BaseMixedInt8Test):
def setUp(self):
super().setUp()
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
if hasattr(self, "pipe"):
del self.pipe
gc.collect()
backend_empty_cache(torch_device)
def test_pipeline(self):
r"""
The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since
we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything
on pipeline.
"""
# self._clear_cuda_cache()
self.pipe = pipeline(
"text-generation",
model=self.model_name,
model_kwargs={"device_map": "auto", "quantization_config": BitsAndBytesConfig(load_in_8bit=True)},
max_new_tokens=self.MAX_NEW_TOKENS,
)
# Avoid sampling different outputs
set_seed(42)
# Real second forward pass
pipeline_output = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS)
@require_torch_multi_accelerator
@apply_skip_if_not_implemented
|
MixedInt8TestPipeline
|
python
|
huggingface__transformers
|
src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
{
"start": 10183,
"end": 10962
}
|
class ____(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, intermediate_size)
self.c_proj = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
GPTBigCodeMLP
|
python
|
apache__airflow
|
providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_resource.py
|
{
"start": 2062,
"end": 10172
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_tests(self, dag_maker):
self._default_client_patch = patch(f"{HOOK_CLASS}._get_default_client")
self._default_client_mock = self._default_client_patch.start()
yield
patch.stopall()
def setup_method(self):
args = {"owner": "airflow", "start_date": timezone.datetime(2020, 2, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
@patch("kubernetes.config.load_kube_config")
@patch("kubernetes.client.api.CoreV1Api.create_namespaced_persistent_volume_claim")
def test_create_application_from_yaml(
self, mock_create_namespaced_persistent_volume_claim, mock_load_kube_config, context
):
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
op.execute(context)
mock_create_namespaced_persistent_volume_claim.assert_called_once_with(
body=yaml.safe_load(TEST_VALID_RESOURCE_YAML), namespace="default"
)
@patch("kubernetes.client.api.CoreV1Api.create_namespaced_persistent_volume_claim")
def test_create_application_from_yaml_list(self, mock_create_namespaced_persistent_volume_claim, context):
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_LIST_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
)
op.execute(context)
assert mock_create_namespaced_persistent_volume_claim.call_count == 2
@patch("kubernetes.config.load_kube_config")
@patch("kubernetes.client.api.CoreV1Api.delete_namespaced_persistent_volume_claim")
def test_single_delete_application_from_yaml(
self, mock_delete_namespaced_persistent_volume_claim, mock_load_kube_config, context
):
op = KubernetesDeleteResourceOperator(
yaml_conf=TEST_VALID_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
op.execute(context)
mock_delete_namespaced_persistent_volume_claim.assert_called()
@patch("kubernetes.config.load_kube_config")
@patch("kubernetes.client.api.CoreV1Api.delete_namespaced_persistent_volume_claim")
def test_multi_delete_application_from_yaml(
self, mock_delete_namespaced_persistent_volume_claim, mock_load_kube_config, context
):
op = KubernetesDeleteResourceOperator(
yaml_conf=TEST_VALID_LIST_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
op.execute(context)
mock_delete_namespaced_persistent_volume_claim.assert_called()
@patch("kubernetes.client.api.CustomObjectsApi.create_namespaced_custom_object")
def test_create_custom_application_from_yaml(self, mock_create_namespaced_custom_object, context):
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_CRD_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
custom_resource_definition=True,
)
op.execute(context)
mock_create_namespaced_custom_object.assert_called_once_with(
"ray.io",
"v1",
"default",
"rayjobs",
yaml.safe_load(TEST_VALID_CRD_YAML),
)
@patch("kubernetes.client.api.CustomObjectsApi.delete_namespaced_custom_object")
def test_delete_custom_application_from_yaml(self, mock_delete_namespaced_custom_object, context):
op = KubernetesDeleteResourceOperator(
yaml_conf=TEST_VALID_CRD_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
custom_resource_definition=True,
)
op.execute(context)
mock_delete_namespaced_custom_object.assert_called_once_with(
"ray.io",
"v1",
"default",
"rayjobs",
"rayjob-sample",
)
@patch("kubernetes.client.api.CustomObjectsApi.create_cluster_custom_object")
def test_create_not_namespaced_custom_app_from_yaml(self, mock_create_cluster_custom_object, context):
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_NOT_NAMESPACED_CRD_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
custom_resource_definition=True,
namespaced=False,
)
op.execute(context)
mock_create_cluster_custom_object.assert_called_once_with(
"kueue.x-k8s.io",
"v1beta1",
"resourceflavors",
yaml.safe_load(TEST_NOT_NAMESPACED_CRD_YAML),
)
@patch("kubernetes.client.api.CustomObjectsApi.delete_cluster_custom_object")
def test_delete_not_namespaced_custom_app_from_yaml(self, mock_delete_cluster_custom_object, context):
op = KubernetesDeleteResourceOperator(
yaml_conf=TEST_NOT_NAMESPACED_CRD_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
custom_resource_definition=True,
namespaced=False,
)
op.execute(context)
mock_delete_cluster_custom_object.assert_called_once_with(
"kueue.x-k8s.io",
"v1beta1",
"resourceflavors",
"default-flavor-test",
)
@patch("kubernetes.config.load_kube_config")
@patch("airflow.providers.cncf.kubernetes.operators.resource.create_from_yaml")
def test_create_objects_retries_on_500_error(self, mock_create_from_yaml, mock_load_kube_config, context):
mock_create_from_yaml.side_effect = [
ApiException(status=500),
MagicMock(),
]
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
op.execute(context)
assert mock_create_from_yaml.call_count == 2
@patch("kubernetes.config.load_kube_config")
@patch("airflow.providers.cncf.kubernetes.operators.resource.create_from_yaml")
def test_create_objects_fails_on_other_exception(
self, mock_create_from_yaml, mock_load_kube_config, context
):
mock_create_from_yaml.side_effect = [ApiException(status=404)]
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
with pytest.raises(ApiException):
op.execute(context)
@patch("kubernetes.config.load_kube_config")
@patch("airflow.providers.cncf.kubernetes.operators.resource.create_from_yaml")
def test_create_objects_retries_five_times(self, mock_create_from_yaml, mock_load_kube_config, context):
mock_create_from_yaml.side_effect = [
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
]
op = KubernetesCreateResourceOperator(
yaml_conf=TEST_VALID_RESOURCE_YAML,
dag=self.dag,
kubernetes_conn_id="kubernetes_default",
task_id="test_task_id",
config_file="/foo/bar",
)
with pytest.raises(ApiException):
op.execute(context)
assert mock_create_from_yaml.call_count == 5
|
TestKubernetesXResourceOperator
|
python
|
openai__openai-python
|
tests/api_resources/beta/test_assistants.py
|
{
"start": 9435,
"end": 19106
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.create(
model="gpt-4o",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.create(
model="gpt-4o",
description="description",
instructions="instructions",
metadata={"foo": "string"},
name="name",
reasoning_effort="none",
response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
"vector_store_ids": ["string"],
"vector_stores": [
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
"metadata": {"foo": "string"},
}
],
},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.create(
model="gpt-4o",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.create(
model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = await response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.retrieve(
"assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.retrieve(
"assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.retrieve(
"assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = await response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
await async_client.beta.assistants.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.update(
assistant_id="assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.update(
assistant_id="assistant_id",
description="description",
instructions="instructions",
metadata={"foo": "string"},
model="string",
name="name",
reasoning_effort="none",
response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.update(
assistant_id="assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.update(
assistant_id="assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = await response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
await async_client.beta.assistants.with_raw_response.update(
assistant_id="",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.list()
assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.list(
after="after",
before="before",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = await response.parse()
assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.delete(
"assistant_id",
)
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.delete(
"assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.delete(
"assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = await response.parse()
assert_matches_type(AssistantDeleted, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
await async_client.beta.assistants.with_raw_response.delete(
"",
)
|
TestAsyncAssistants
|
python
|
bokeh__bokeh
|
src/bokeh/models/textures.py
|
{
"start": 1889,
"end": 2197
}
|
class ____(Texture):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
code = Required(String, help="""
A snippet of JavaScript code to execute in the browser.
""")
|
CanvasTexture
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataproc.py
|
{
"start": 125862,
"end": 129188
}
|
class ____(GoogleCloudBaseOperator):
"""
Get the batch workload resource representation.
:param batch_id: Required. The ID to use for the batch, which will become the final component
of the batch's resource name.
This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"batch_id",
"region",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataprocBatchLink(),)
def __init__(
self,
*,
batch_id: str,
region: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.batch_id = batch_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Getting batch: %s", self.batch_id)
batch = hook.get_batch(
batch_id=self.batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
DataprocBatchLink.persist(
context=context,
project_id=project_id,
region=self.region,
batch_id=self.batch_id,
)
return Batch.to_dict(batch)
|
DataprocGetBatchOperator
|
python
|
gevent__gevent
|
src/greentest/3.14/test_socket.py
|
{
"start": 200041,
"end": 200693
}
|
class ____(ThreadedTCPSocketTest):
def testClose(self):
conn, _ = self.serv.accept()
read, _, _ = select.select([conn], [], [], support.SHORT_TIMEOUT)
self.assertEqual(read, [conn])
self.assertEqual(conn.recv(1), b'x')
conn.close()
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
self.cli.send(b'x')
read, _, _ = select.select([self.cli], [], [], support.SHORT_TIMEOUT)
self.assertEqual(read, [self.cli])
self.assertEqual(self.cli.recv(1), b'')
|
TCPCloserTest
|
python
|
pytorch__pytorch
|
torch/nn/modules/fold.py
|
{
"start": 6671,
"end": 13227
}
|
class ____(Module):
(
r"""Extracts sliding local blocks from a batched input tensor.
Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`,
where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
and :math:`*` represent arbitrary spatial dimensions. This operation flattens
each sliding :attr:`kernel_size`-sized block within the spatial dimensions
of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output`
tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where
:math:`C \times \prod(\text{kernel\_size})` is the total number of values
within each block (a block has :math:`\prod(\text{kernel\_size})` spatial
locations each containing a :math:`C`-channeled vector), and :math:`L` is
the total number of such blocks:
.. math::
L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`\text{spatial\_size}` is formed by the spatial dimensions
of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial
dimensions.
Therefore, indexing :attr:`output` at the last dimension (column dimension)
gives all values within a certain block.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
Args:
kernel_size (int or tuple): the size of the sliding blocks
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
stride (int or tuple, optional): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
* If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or
:attr:`stride` is an int or a tuple of length 1, their values will be
replicated across all spatial dimensions.
* For the case of two input spatial dimensions this operation is sometimes
called ``im2col``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are
supported.
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above
Examples::
>>> unfold = nn.Unfold(kernel_size=(2, 3))
>>> input = torch.randn(2, 5, 3, 4)
>>> output = unfold(input)
>>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
>>> # 4 blocks (2x3 kernels) in total in the 3x4 input
>>> output.size()
torch.Size([2, 30, 4])
>>> # xdoctest: +IGNORE_WANT
>>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
>>> inp = torch.randn(1, 3, 10, 12)
>>> w = torch.randn(2, 3, 4, 5)
>>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
>>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
>>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
>>> # or equivalently (and avoiding a copy),
>>> # out = out_unf.view(1, 2, 7, 8)
>>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
tensor(1.9073e-06)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
)
__constants__ = ["kernel_size", "dilation", "padding", "stride"]
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1,
) -> None:
super().__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.unfold(
input, self.kernel_size, self.dilation, self.padding, self.stride
)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return (
"kernel_size={kernel_size}, dilation={dilation}, padding={padding},"
" stride={stride}".format(**self.__dict__)
)
|
Unfold
|
python
|
apache__airflow
|
providers/standard/tests/unit/standard/operators/test_weekday.py
|
{
"start": 2611,
"end": 14134
}
|
class ____:
"""
Tests for BranchDayOfWeekOperator
"""
@classmethod
def setup_class(cls):
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
session.query(XCom).delete()
def _assert_task_ids_match_states(self, dr, task_ids_to_states):
"""Helper that asserts task instances with a given id are in a given state"""
tis = dr.get_task_instances()
for ti in tis:
try:
expected_state = task_ids_to_states[ti.task_id]
except KeyError:
raise ValueError(f"Invalid task id {ti.task_id} found!")
else:
assert_msg = f"Task {ti.task_id} has state {ti.state} instead of expected {expected_state}"
assert ti.state == expected_state, assert_msg
@pytest.mark.parametrize(
"weekday", TEST_CASE_BRANCH_FOLLOW_TRUE.values(), ids=TEST_CASE_BRANCH_FOLLOW_TRUE.keys()
)
@time_machine.travel("2021-01-25") # Monday
def test_branch_follow_true(self, weekday, dag_maker):
"""Checks if BranchDayOfWeekOperator follows true branch"""
with dag_maker(
"branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule=INTERVAL, serialized=True
):
branch_op = BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true=["branch_1", "branch_2"],
follow_task_ids_if_false="branch_3",
week_day=weekday,
)
branch_1 = EmptyOperator(task_id="branch_1")
branch_2 = EmptyOperator(task_id="branch_2")
branch_3 = EmptyOperator(task_id="branch_3")
branch_1.set_upstream(branch_op)
branch_2.set_upstream(branch_op)
branch_3.set_upstream(branch_op)
dr = dag_maker.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
logical_date=DEFAULT_DATE,
state=State.RUNNING,
data_interval=DataInterval(DEFAULT_DATE, DEFAULT_DATE),
)
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with pytest.raises(DownstreamTasksSkipped) as exc_info:
dag_maker.run_ti("make_choice", dr)
assert exc_info.value.tasks == [("branch_3", -1)]
else:
dag_maker.run_ti("make_choice", dr)
self._assert_task_ids_match_states(
dr,
{
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.NONE,
"branch_3": State.SKIPPED,
},
)
@time_machine.travel("2021-01-25") # Monday
def test_branch_follow_true_with_logical_date(self, dag_maker):
"""Checks if BranchDayOfWeekOperator follows true branch when set use_task_logical_date"""
with dag_maker(
"branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule=INTERVAL, serialized=True
):
branch_op = BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day="Wednesday",
use_task_logical_date=True, # We compare to DEFAULT_DATE which is Wednesday
)
branch_1 = EmptyOperator(task_id="branch_1")
branch_2 = EmptyOperator(task_id="branch_2")
branch_1.set_upstream(branch_op)
branch_2.set_upstream(branch_op)
dr = dag_maker.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
logical_date=DEFAULT_DATE,
state=State.RUNNING,
data_interval=DataInterval(DEFAULT_DATE, DEFAULT_DATE),
)
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with pytest.raises(DownstreamTasksSkipped) as exc_info:
dag_maker.run_ti("make_choice", dr)
assert exc_info.value.tasks == [("branch_2", -1)]
else:
dag_maker.run_ti("make_choice", dr)
self._assert_task_ids_match_states(
dr,
{
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.SKIPPED,
},
)
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Skip on Airflow < 3.0")
@time_machine.travel("2021-01-25") # Monday
def test_choose_branch_should_use_run_after_when_logical_date_none(self, dag_maker):
with dag_maker(
"branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule=INTERVAL, serialized=True
):
branch_op = BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day="Wednesday",
use_task_logical_date=True, # We compare to DEFAULT_DATE which is Wednesday
)
branch_1 = EmptyOperator(task_id="branch_1")
branch_2 = EmptyOperator(task_id="branch_2")
branch_1.set_upstream(branch_op)
branch_2.set_upstream(branch_op)
dr = dag_maker.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
state=State.RUNNING,
**{"run_after": DEFAULT_DATE},
)
assert branch_op.choose_branch(context={"dag_run": dr}) == "branch_1"
@time_machine.travel("2021-01-25") # Monday
def test_branch_follow_false(self, dag_maker):
"""Checks if BranchDayOfWeekOperator follow false branch"""
with dag_maker(
"branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule=INTERVAL, serialized=True
):
branch_op = BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day="Sunday",
)
branch_1 = EmptyOperator(task_id="branch_1")
branch_2 = EmptyOperator(task_id="branch_2")
branch_1.set_upstream(branch_op)
branch_2.set_upstream(branch_op)
dr = dag_maker.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
logical_date=DEFAULT_DATE,
state=State.RUNNING,
data_interval=DataInterval(DEFAULT_DATE, DEFAULT_DATE),
)
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with pytest.raises(DownstreamTasksSkipped) as exc_info:
dag_maker.run_ti("make_choice", dr)
assert exc_info.value.tasks == [("branch_1", -1)]
else:
dag_maker.run_ti("make_choice", dr)
self._assert_task_ids_match_states(
dr,
{
"make_choice": State.SUCCESS,
"branch_1": State.SKIPPED,
"branch_2": State.NONE,
},
)
def test_branch_with_no_weekday(self, dag_maker):
"""Check if BranchDayOfWeekOperator raises exception on missing weekday"""
with pytest.raises((TypeError, AirflowException), match="missing keyword argument 'week_day'"):
with dag_maker(
"branch_day_of_week_operator_test",
start_date=DEFAULT_DATE,
schedule=INTERVAL,
serialized=True,
):
BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
)
def test_branch_with_invalid_type(self, dag_maker):
"""Check if BranchDayOfWeekOperator raises exception on unsupported weekday type"""
invalid_week_day = 5
with pytest.raises(
TypeError,
match=f"Unsupported Type for week_day parameter: {type(invalid_week_day)}."
"Input should be iterable type:"
"str, set, list, dict or Weekday enum type",
):
with dag_maker(
"branch_day_of_week_operator_test",
start_date=DEFAULT_DATE,
schedule=INTERVAL,
serialized=True,
):
BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day=invalid_week_day,
)
@pytest.mark.parametrize(
("_", "week_day", "fail_msg"),
[
("string", "Thsday", "Thsday"),
("list", ["Monday", "Thsday"], "Thsday"),
("set", {WeekDay.MONDAY, "Thsday"}, "Thsday"),
],
)
def test_weekday_branch_invalid_weekday_value(self, _, week_day, fail_msg, dag_maker):
"""Check if BranchDayOfWeekOperator raises exception on wrong value of weekday"""
with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{fail_msg}"'):
with dag_maker(
"branch_day_of_week_operator_test",
start_date=DEFAULT_DATE,
schedule=INTERVAL,
serialized=True,
):
BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day=week_day,
)
@time_machine.travel("2021-01-25") # Monday
def test_branch_xcom_push_true_branch(self, dag_maker):
"""Check if BranchDayOfWeekOperator push to xcom value of follow_task_ids_if_true"""
with dag_maker(
"branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule=INTERVAL, serialized=True
):
branch_op = BranchDayOfWeekOperator(
task_id="make_choice",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
week_day="Monday",
)
branch_1 = EmptyOperator(task_id="branch_1")
branch_2 = EmptyOperator(task_id="branch_2")
branch_1.set_upstream(branch_op)
branch_2.set_upstream(branch_op)
dr = dag_maker.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
logical_date=DEFAULT_DATE,
state=State.RUNNING,
data_interval=DataInterval(DEFAULT_DATE, DEFAULT_DATE),
)
branch_op_ti = dr.get_task_instance(branch_op.task_id)
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with pytest.raises(DownstreamTasksSkipped) as exc_info:
branch_op_ti.run()
assert exc_info.value.tasks == [("branch_2", -1)]
else:
dag_maker.run_ti("make_choice", dr)
assert branch_op_ti.xcom_pull(task_ids="make_choice", key=XCOM_SKIPMIXIN_KEY) == {
XCOM_SKIPMIXIN_FOLLOWED: ["branch_1"]
}
|
TestBranchDayOfWeekOperator
|
python
|
pytorch__pytorch
|
test/jit/test_hooks_modules.py
|
{
"start": 1201,
"end": 1551
}
|
class ____(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SubmoduleForwardSingleInput(submodule_name)
def forward(self, input: str):
input = input + "_outermod"
return self.submodule.forward(input)
|
ModuleDirectforwardSubmodCall
|
python
|
pytorch__pytorch
|
torch/_numpy/_funcs.py
|
{
"start": 1587,
"end": 2097
}
|
class ____:
"""
Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
last revision: 1999-7-23
Cosmetic changes by T. Oliphant 2001
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
__all__ += ["index_exp", "s_"]
|
IndexExpression
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/cbook.py
|
{
"start": 69842,
"end": 82788
}
|
class ____(collections.abc.MutableSet):
def __init__(self):
self._od = collections.OrderedDict()
def __contains__(self, key):
return key in self._od
def __iter__(self):
return iter(self._od)
def __len__(self):
return len(self._od)
def add(self, key):
self._od.pop(key, None)
self._od[key] = None
def discard(self, key):
self._od.pop(key, None)
# Agg's buffers are unmultiplied RGBA8888, which neither PyQt<=5.1 nor cairo
# support; however, both do support premultiplied ARGB32.
def _premultiplied_argb32_to_unmultiplied_rgba8888(buf):
"""
Convert a premultiplied ARGB32 buffer to an unmultiplied RGBA8888 buffer.
"""
rgba = np.take( # .take() ensures C-contiguity of the result.
buf,
[2, 1, 0, 3] if sys.byteorder == "little" else [1, 2, 3, 0], axis=2)
rgb = rgba[..., :-1]
alpha = rgba[..., -1]
# Un-premultiply alpha. The formula is the same as in cairo-png.c.
mask = alpha != 0
for channel in np.rollaxis(rgb, -1):
channel[mask] = (
(channel[mask].astype(int) * 255 + alpha[mask] // 2)
// alpha[mask])
return rgba
def _unmultiplied_rgba8888_to_premultiplied_argb32(rgba8888):
"""
Convert an unmultiplied RGBA8888 buffer to a premultiplied ARGB32 buffer.
"""
if sys.byteorder == "little":
argb32 = np.take(rgba8888, [2, 1, 0, 3], axis=2)
rgb24 = argb32[..., :-1]
alpha8 = argb32[..., -1:]
else:
argb32 = np.take(rgba8888, [3, 0, 1, 2], axis=2)
alpha8 = argb32[..., :1]
rgb24 = argb32[..., 1:]
# Only bother premultiplying when the alpha channel is not fully opaque,
# as the cost is not negligible. The unsafe cast is needed to do the
# multiplication in-place in an integer buffer.
if alpha8.min() != 0xff:
np.multiply(rgb24, alpha8 / 0xff, out=rgb24, casting="unsafe")
return argb32
def _get_nonzero_slices(buf):
"""
Return the bounds of the nonzero region of a 2D array as a pair of slices.
``buf[_get_nonzero_slices(buf)]`` is the smallest sub-rectangle in *buf*
that encloses all non-zero entries in *buf*. If *buf* is fully zero, then
``(slice(0, 0), slice(0, 0))`` is returned.
"""
x_nz, = buf.any(axis=0).nonzero()
y_nz, = buf.any(axis=1).nonzero()
if len(x_nz) and len(y_nz):
l, r = x_nz[[0, -1]]
b, t = y_nz[[0, -1]]
return slice(b, t + 1), slice(l, r + 1)
else:
return slice(0, 0), slice(0, 0)
def _pformat_subprocess(command):
"""Pretty-format a subprocess command for printing/logging purposes."""
return (command if isinstance(command, str)
else " ".join(shlex.quote(os.fspath(arg)) for arg in command))
def _check_and_log_subprocess(command, logger, **kwargs):
"""
Run *command*, returning its stdout output if it succeeds.
If it fails (exits with nonzero return code), raise an exception whose text
includes the failed command and captured stdout and stderr output.
Regardless of the return code, the command is logged at DEBUG level on
*logger*. In case of success, the output is likewise logged.
"""
logger.debug('%s', _pformat_subprocess(command))
proc = subprocess.run(command, capture_output=True, **kwargs)
if proc.returncode:
stdout = proc.stdout
if isinstance(stdout, bytes):
stdout = stdout.decode()
stderr = proc.stderr
if isinstance(stderr, bytes):
stderr = stderr.decode()
raise RuntimeError(
f"The command\n"
f" {_pformat_subprocess(command)}\n"
f"failed and generated the following output:\n"
f"{stdout}\n"
f"and the following error:\n"
f"{stderr}")
if proc.stdout:
logger.debug("stdout:\n%s", proc.stdout)
if proc.stderr:
logger.debug("stderr:\n%s", proc.stderr)
return proc.stdout
def _setup_new_guiapp():
"""
Perform OS-dependent setup when Matplotlib creates a new GUI application.
"""
# Windows: If not explicit app user model id has been set yet (so we're not
# already embedded), then set it to "matplotlib", so that taskbar icons are
# correct.
try:
_c_internal_utils.Win32_GetCurrentProcessExplicitAppUserModelID()
except OSError:
_c_internal_utils.Win32_SetCurrentProcessExplicitAppUserModelID(
"matplotlib")
def _format_approx(number, precision):
"""
Format the number with at most the number of decimals given as precision.
Remove trailing zeros and possibly the decimal point.
"""
return f'{number:.{precision}f}'.rstrip('0').rstrip('.') or '0'
def _g_sig_digits(value, delta):
"""
Return the number of significant digits to %g-format *value*, assuming that
it is known with an error of *delta*.
"""
# For inf or nan, the precision doesn't matter.
if not math.isfinite(value):
return 0
if delta == 0:
if value == 0:
# if both value and delta are 0, np.spacing below returns 5e-324
# which results in rather silly results
return 3
# delta = 0 may occur when trying to format values over a tiny range;
# in that case, replace it by the distance to the closest float.
delta = abs(np.spacing(value))
# If e.g. value = 45.67 and delta = 0.02, then we want to round to 2 digits
# after the decimal point (floor(log10(0.02)) = -2); 45.67 contributes 2
# digits before the decimal point (floor(log10(45.67)) + 1 = 2): the total
# is 4 significant digits. A value of 0 contributes 1 "digit" before the
# decimal point.
return max(
0,
(math.floor(math.log10(abs(value))) + 1 if value else 1)
- math.floor(math.log10(delta)))
def _unikey_or_keysym_to_mplkey(unikey, keysym):
"""
Convert a Unicode key or X keysym to a Matplotlib key name.
The Unicode key is checked first; this avoids having to list most printable
keysyms such as ``EuroSign``.
"""
# For non-printable characters, gtk3 passes "\0" whereas tk passes an "".
if unikey and unikey.isprintable():
return unikey
key = keysym.lower()
if key.startswith("kp_"): # keypad_x (including kp_enter).
key = key[3:]
if key.startswith("page_"): # page_{up,down}
key = key.replace("page_", "page")
if key.endswith(("_l", "_r")): # alt_l, ctrl_l, shift_l.
key = key[:-2]
if sys.platform == "darwin" and key == "meta":
# meta should be reported as command on mac
key = "cmd"
key = {
"return": "enter",
"prior": "pageup", # Used by tk.
"next": "pagedown", # Used by tk.
}.get(key, key)
return key
@functools.cache
def _make_class_factory(mixin_class, fmt, attr_name=None):
"""
Return a function that creates picklable classes inheriting from a mixin.
After ::
factory = _make_class_factory(FooMixin, fmt, attr_name)
FooAxes = factory(Axes)
``Foo`` is a class that inherits from ``FooMixin`` and ``Axes`` and **is
picklable** (picklability is what differentiates this from a plain call to
`type`). Its ``__name__`` is set to ``fmt.format(Axes.__name__)`` and the
base class is stored in the ``attr_name`` attribute, if not None.
Moreover, the return value of ``factory`` is memoized: calls with the same
``Axes`` class always return the same subclass.
"""
@functools.cache
def class_factory(axes_class):
# if we have already wrapped this class, declare victory!
if issubclass(axes_class, mixin_class):
return axes_class
# The parameter is named "axes_class" for backcompat but is really just
# a base class; no axes semantics are used.
base_class = axes_class
class subcls(mixin_class, base_class):
# Better approximation than __module__ = "matplotlib.cbook".
__module__ = mixin_class.__module__
def __reduce__(self):
return (_picklable_class_constructor,
(mixin_class, fmt, attr_name, base_class),
self.__getstate__())
subcls.__name__ = subcls.__qualname__ = fmt.format(base_class.__name__)
if attr_name is not None:
setattr(subcls, attr_name, base_class)
return subcls
class_factory.__module__ = mixin_class.__module__
return class_factory
def _picklable_class_constructor(mixin_class, fmt, attr_name, base_class):
"""Internal helper for _make_class_factory."""
factory = _make_class_factory(mixin_class, fmt, attr_name)
cls = factory(base_class)
return cls.__new__(cls)
def _is_torch_array(x):
"""Return whether *x* is a PyTorch Tensor."""
try:
# We're intentionally not attempting to import torch. If somebody
# has created a torch array, torch should already be in sys.modules.
tp = sys.modules.get("torch").Tensor
except AttributeError:
return False # Module not imported or a nonstandard module with no Tensor attr.
return (isinstance(tp, type) # Just in case it's a very nonstandard module.
and isinstance(x, tp))
def _is_jax_array(x):
"""Return whether *x* is a JAX Array."""
try:
# We're intentionally not attempting to import jax. If somebody
# has created a jax array, jax should already be in sys.modules.
tp = sys.modules.get("jax").Array
except AttributeError:
return False # Module not imported or a nonstandard module with no Array attr.
return (isinstance(tp, type) # Just in case it's a very nonstandard module.
and isinstance(x, tp))
def _is_pandas_dataframe(x):
"""Check if *x* is a Pandas DataFrame."""
try:
# We're intentionally not attempting to import Pandas. If somebody
# has created a Pandas DataFrame, Pandas should already be in sys.modules.
tp = sys.modules.get("pandas").DataFrame
except AttributeError:
return False # Module not imported or a nonstandard module with no Array attr.
return (isinstance(tp, type) # Just in case it's a very nonstandard module.
and isinstance(x, tp))
def _is_tensorflow_array(x):
"""Return whether *x* is a TensorFlow Tensor or Variable."""
try:
# We're intentionally not attempting to import TensorFlow. If somebody
# has created a TensorFlow array, TensorFlow should already be in
# sys.modules we use `is_tensor` to not depend on the class structure
# of TensorFlow arrays, as `tf.Variables` are not instances of
# `tf.Tensor` (they both convert the same way).
is_tensor = sys.modules.get("tensorflow").is_tensor
except AttributeError:
return False
try:
return is_tensor(x)
except Exception:
return False # Just in case it's a very nonstandard module.
def _unpack_to_numpy(x):
"""Internal helper to extract data from e.g. pandas and xarray objects."""
if isinstance(x, np.ndarray):
# If numpy, return directly
return x
if hasattr(x, 'to_numpy'):
# Assume that any to_numpy() method actually returns a numpy array
return x.to_numpy()
if hasattr(x, 'values'):
xtmp = x.values
# For example a dict has a 'values' attribute, but it is not a property
# so in this case we do not want to return a function
if isinstance(xtmp, np.ndarray):
return xtmp
if _is_torch_array(x) or _is_jax_array(x) or _is_tensorflow_array(x):
# using np.asarray() instead of explicitly __array__(), as the latter is
# only _one_ of many methods, and it's the last resort, see also
# https://numpy.org/devdocs/user/basics.interoperability.html#using-arbitrary-objects-in-numpy
# therefore, let arrays do better if they can
xtmp = np.asarray(x)
# In case np.asarray method does not return a numpy array in future
if isinstance(xtmp, np.ndarray):
return xtmp
return x
def _auto_format_str(fmt, value):
"""
Apply *value* to the format string *fmt*.
This works both with unnamed %-style formatting and
unnamed {}-style formatting. %-style formatting has priority.
If *fmt* is %-style formattable that will be used. Otherwise,
{}-formatting is applied. Strings without formatting placeholders
are passed through as is.
Examples
--------
>>> _auto_format_str('%.2f m', 0.2)
'0.20 m'
>>> _auto_format_str('{} m', 0.2)
'0.2 m'
>>> _auto_format_str('const', 0.2)
'const'
>>> _auto_format_str('%d or {}', 0.2)
'0 or {}'
"""
try:
return fmt % (value,)
except (TypeError, ValueError):
return fmt.format(value)
|
_OrderedSet
|
python
|
PrefectHQ__prefect
|
src/prefect/server/api/clients.py
|
{
"start": 2388,
"end": 8190
}
|
class ____(BaseClient):
async def read_deployment_raw(self, deployment_id: UUID) -> Response:
return await self._http_client.get(f"/deployments/{deployment_id}")
async def read_deployment(
self, deployment_id: UUID
) -> Optional[DeploymentResponse]:
try:
response = await self.read_deployment_raw(deployment_id)
response.raise_for_status()
except httpx.HTTPStatusError as e:
if e.response.status_code == status.HTTP_404_NOT_FOUND:
return None
raise
return DeploymentResponse.model_validate(response.json())
async def read_flow_raw(self, flow_id: UUID) -> Response:
return await self._http_client.get(f"/flows/{flow_id}")
async def create_flow_run(
self, deployment_id: UUID, flow_run_create: DeploymentFlowRunCreate
) -> Response:
return await self._http_client.post(
f"/deployments/{deployment_id}/create_flow_run",
json=flow_run_create.model_dump(mode="json"),
)
async def read_flow_run_raw(self, flow_run_id: UUID) -> Response:
return await self._http_client.get(f"/flow_runs/{flow_run_id}")
async def read_task_run_raw(self, task_run_id: UUID) -> Response:
return await self._http_client.get(f"/task_runs/{task_run_id}")
async def resume_flow_run(self, flow_run_id: UUID) -> OrchestrationResult:
response = await self._http_client.post(
f"/flow_runs/{flow_run_id}/resume",
)
response.raise_for_status()
return OrchestrationResult.model_validate(response.json())
async def pause_deployment(self, deployment_id: UUID) -> Response:
return await self._http_client.post(
f"/deployments/{deployment_id}/pause_deployment",
)
async def resume_deployment(self, deployment_id: UUID) -> Response:
return await self._http_client.post(
f"/deployments/{deployment_id}/resume_deployment",
)
async def set_flow_run_state(
self, flow_run_id: UUID, state: StateCreate
) -> Response:
return await self._http_client.post(
f"/flow_runs/{flow_run_id}/set_state",
json={
"state": state.model_dump(mode="json"),
"force": False,
},
)
async def pause_work_pool(self, work_pool_name: str) -> Response:
return await self._http_client.patch(
f"/work_pools/{quote(work_pool_name)}", json={"is_paused": True}
)
async def resume_work_pool(self, work_pool_name: str) -> Response:
return await self._http_client.patch(
f"/work_pools/{quote(work_pool_name)}", json={"is_paused": False}
)
async def read_work_pool_raw(self, work_pool_id: UUID) -> Response:
return await self._http_client.post(
"/work_pools/filter",
json={"work_pools": {"id": {"any_": [str(work_pool_id)]}}},
)
async def read_work_pool(self, work_pool_id: UUID) -> Optional[WorkPool]:
response = await self.read_work_pool_raw(work_pool_id)
response.raise_for_status()
pools = pydantic.TypeAdapter(List[WorkPool]).validate_python(response.json())
return pools[0] if pools else None
async def read_work_queue_raw(self, work_queue_id: UUID) -> Response:
return await self._http_client.get(f"/work_queues/{work_queue_id}")
async def read_work_queue_status_raw(self, work_queue_id: UUID) -> Response:
return await self._http_client.get(f"/work_queues/{work_queue_id}/status")
async def pause_work_queue(self, work_queue_id: UUID) -> Response:
return await self._http_client.patch(
f"/work_queues/{work_queue_id}",
json={"is_paused": True},
)
async def resume_work_queue(self, work_queue_id: UUID) -> Response:
return await self._http_client.patch(
f"/work_queues/{work_queue_id}",
json={"is_paused": False},
)
async def read_block_document_raw(
self,
block_document_id: UUID,
include_secrets: bool = True,
) -> Response:
return await self._http_client.get(
f"/block_documents/{block_document_id}",
params=dict(include_secrets=include_secrets),
)
VARIABLE_PAGE_SIZE = 200
MAX_VARIABLES_PER_WORKSPACE = 1000
async def read_workspace_variables(
self, names: Optional[List[str]] = None
) -> Dict[str, StrictVariableValue]:
variables: Dict[str, StrictVariableValue] = {}
offset = 0
filter = VariableFilter()
if names is not None and not names:
return variables
elif names is not None:
filter.name = VariableFilterName(any_=list(set(names)))
for offset in range(
0, self.MAX_VARIABLES_PER_WORKSPACE, self.VARIABLE_PAGE_SIZE
):
response = await self._http_client.post(
"/variables/filter",
json={
"variables": filter.model_dump(),
"limit": self.VARIABLE_PAGE_SIZE,
"offset": offset,
},
)
if response.status_code >= 300:
response.raise_for_status()
results = response.json()
for variable in results:
variables[variable["name"]] = variable["value"]
if len(results) < self.VARIABLE_PAGE_SIZE:
break
return variables
async def read_concurrency_limit_v2_raw(
self, concurrency_limit_id: UUID
) -> Response:
return await self._http_client.get(
f"/v2/concurrency_limits/{concurrency_limit_id}"
)
|
OrchestrationClient
|
python
|
ray-project__ray
|
python/ray/serve/_private/test_utils.py
|
{
"start": 21023,
"end": 27145
}
|
class ____:
def __init__(self, target: int):
self.count = 0
self.target = target
self.ready_event = asyncio.Event()
def inc(self):
self.count += 1
if self.count == self.target:
self.ready_event.set()
async def wait(self):
await self.ready_event.wait()
def tlog(s: str, level: str = "INFO"):
"""Convenient logging method for testing."""
now = datetime.datetime.now().strftime("%H:%M:%S.%f")[:-3]
print(f"[{level}] {now} {s}")
def check_target_groups_ready(
client: ServeControllerClient,
app_name: str,
protocol: Union[str, RequestProtocol] = RequestProtocol.HTTP,
):
"""Wait for target groups to be ready for the given app and protocol.
Target groups are ready when there are at least one target for the given protocol. And it's
possible that target groups are not ready immediately. An example is when the controller
is recovering from a crash.
"""
target_groups = ray.get(client._controller.get_target_groups.remote(app_name))
target_groups = [
target_group
for target_group in target_groups
if target_group.protocol == protocol
]
all_targets = [
target for target_group in target_groups for target in target_group.targets
]
return len(all_targets) > 0
def get_application_urls(
protocol: Union[str, RequestProtocol] = RequestProtocol.HTTP,
app_name: str = SERVE_DEFAULT_APP_NAME,
use_localhost: bool = True,
is_websocket: bool = False,
exclude_route_prefix: bool = False,
from_proxy_manager: bool = False,
) -> List[str]:
"""Get the URL of the application.
Args:
protocol: The protocol to use for the application.
app_name: The name of the application.
use_localhost: Whether to use localhost instead of the IP address.
Set to True if Serve deployments are not exposed publicly or
for low latency benchmarking.
is_websocket: Whether the url should be served as a websocket.
exclude_route_prefix: The route prefix to exclude from the application.
from_proxy_manager: Whether the caller is a proxy manager.
Returns:
The URLs of the application.
"""
client = _get_global_client(_health_check_controller=True)
serve_details = client.get_serve_details()
assert (
app_name in serve_details["applications"]
), f"App {app_name} not found in serve details. Use this method only when the app is known to be running."
route_prefix = serve_details["applications"][app_name]["route_prefix"]
# route_prefix is set to None when route_prefix value is specifically set to None
# in the config used to deploy the app.
if exclude_route_prefix or route_prefix is None:
route_prefix = ""
if isinstance(protocol, str):
protocol = RequestProtocol(protocol)
target_groups: List[TargetGroup] = ray.get(
client._controller.get_target_groups.remote(app_name, from_proxy_manager)
)
target_groups = [
target_group
for target_group in target_groups
if target_group.protocol == protocol
]
if len(target_groups) == 0:
raise ValueError(
f"No target group found for app {app_name} with protocol {protocol} and route prefix {route_prefix}"
)
urls = []
for target_group in target_groups:
for target in target_group.targets:
ip = "localhost" if use_localhost else target.ip
if protocol == RequestProtocol.HTTP:
scheme = "ws" if is_websocket else "http"
url = f"{scheme}://{build_address(ip, target.port)}{route_prefix}"
elif protocol == RequestProtocol.GRPC:
if is_websocket:
raise ValueError(
"is_websocket=True is not supported with gRPC protocol."
)
url = build_address(ip, target.port)
else:
raise ValueError(f"Unsupported protocol: {protocol}")
url = url.rstrip("/")
urls.append(url)
return urls
def get_application_url(
protocol: Union[str, RequestProtocol] = RequestProtocol.HTTP,
app_name: str = SERVE_DEFAULT_APP_NAME,
use_localhost: bool = True,
is_websocket: bool = False,
exclude_route_prefix: bool = False,
from_proxy_manager: bool = False,
) -> str:
"""Get the URL of the application.
Args:
protocol: The protocol to use for the application.
app_name: The name of the application.
use_localhost: Whether to use localhost instead of the IP address.
Set to True if Serve deployments are not exposed publicly or
for low latency benchmarking.
is_websocket: Whether the url should be served as a websocket.
exclude_route_prefix: The route prefix to exclude from the application.
from_proxy_manager: Whether the caller is a proxy manager.
Returns:
The URL of the application. If there are multiple URLs, a random one is returned.
"""
return random.choice(
get_application_urls(
protocol,
app_name,
use_localhost,
is_websocket,
exclude_route_prefix,
from_proxy_manager,
)
)
def check_running(app_name: str = SERVE_DEFAULT_APP_NAME):
assert serve.status().applications[app_name].status == ApplicationStatus.RUNNING
return True
def request_with_retries(timeout=30, app_name=SERVE_DEFAULT_APP_NAME):
result_holder = {"resp": None}
def _attempt() -> bool:
try:
url = get_application_url("HTTP", app_name=app_name)
result_holder["resp"] = httpx.get(url, timeout=timeout)
return True
except (httpx.RequestError, IndexError):
return False
try:
wait_for_condition(_attempt, timeout=timeout)
return result_holder["resp"]
except RuntimeError as e:
# Preserve previous API by raising TimeoutError on expiry
raise TimeoutError from e
|
Counter
|
python
|
Textualize__textual
|
docs/examples/styles/grid_size_columns.py
|
{
"start": 100,
"end": 393
}
|
class ____(App):
CSS_PATH = "grid_size_columns.tcss"
def compose(self):
yield Grid(
Label("1"),
Label("2"),
Label("3"),
Label("4"),
Label("5"),
)
if __name__ == "__main__":
app = MyApp()
app.run()
|
MyApp
|
python
|
google__pytype
|
pytype/tools/xref/parse_args.py
|
{
"start": 209,
"end": 2642
}
|
class ____(arg_parser.Parser):
"""Subclass the tool parser to retain the raw input field."""
def process(self, tool_args, pytype_args):
# Needed for the debug indexer
tool_args.raw_input = pytype_args.input[0]
def make_parser():
"""Make parser for command line args.
Returns:
A Parser object.
"""
def add_kythe_field(parser, field):
parser.add_argument(
"--" + field, dest=field, type=str, action="store", default="",
help="Part of kythe's file-level vname proto.")
parser = argparse.ArgumentParser(usage="%(prog)s [options] input")
add_kythe_field(parser, "kythe_corpus")
add_kythe_field(parser, "kythe_root")
add_kythe_field(parser, "kythe_path")
# For the debug indexer
parser.add_argument("--show-types", action="store_true",
dest="show_types", default=None,
help="Display inferred types.")
parser.add_argument("--show-kythe", action="store_true",
dest="show_kythe", default=None,
help="Display kythe facts.")
parser.add_argument("--show-spans", action="store_true",
dest="show_spans", default=None,
help="Display kythe spans.")
# Don't index builtins and stdlib.
parser.add_argument("--skip-stdlib", action="store_true",
dest="skip_stdlib", default=None,
help="Display inferred types.")
# Add options from pytype-single.
wrapper = datatypes.ParserWrapper(parser)
pytype_config.add_basic_options(wrapper)
with wrapper.add_only(["--imports_info", "--debug"]):
pytype_config.add_infrastructure_options(wrapper)
pytype_config.add_debug_options(wrapper)
wrapper.add_argument("input", metavar="input", nargs=1,
help="A .py file to index")
return XrefParser(parser, pytype_single_args=wrapper.actions)
def parse_args(argv):
"""Parse command line args.
Arguments:
argv: Raw command line args, typically sys.argv[1:]
Returns:
A tuple of (
parsed_args: argparse.Namespace,
kythe_args: kythe.Args,
pytype_options: pytype.config.Options)
"""
parser = make_parser()
args = parser.parse_args(argv)
t = args.tool_args
kythe_args = kythe.Args(
corpus=t.kythe_corpus, root=t.kythe_root, path=t.kythe_path,
skip_stdlib=t.skip_stdlib
)
return (args.all_args, kythe_args, args.pytype_opts)
|
XrefParser
|
python
|
openai__openai-python
|
src/openai/_module_client.py
|
{
"start": 2790,
"end": 2929
}
|
class ____(LazyProxy["Responses"]):
@override
def __load__(self) -> Responses:
return _load_client().responses
|
ResponsesProxy
|
python
|
getsentry__sentry
|
src/sentry/search/events/fields.py
|
{
"start": 44940,
"end": 45267
}
|
class ____(StringArg):
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str:
value = super().normalize(value, params, combinator)
# SnQL interprets string types as string, so strip the
# quotes added in StringArg.normalize.
return value[1:-1]
|
SnQLStringArg
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_https.py
|
{
"start": 46525,
"end": 46672
}
|
class ____(BaseTestHTTPS):
tls_protocol_name = "TLSv1.1"
certs = TLSv1_1_CERTS
@pytest.mark.usefixtures("requires_tlsv1_2")
|
TestHTTPS_TLSv1_1
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/data_context/cloud_data_context.py
|
{
"start": 3816,
"end": 3906
}
|
class ____:
user_id: uuid.UUID
workspaces: list[Workspace]
@public_api
|
CloudUserInfo
|
python
|
doocs__leetcode
|
solution/3000-3099/3066.Minimum Operations to Exceed Threshold Value II/Solution.py
|
{
"start": 0,
"end": 286
}
|
class ____:
def minOperations(self, nums: List[int], k: int) -> int:
heapify(nums)
ans = 0
while len(nums) > 1 and nums[0] < k:
x, y = heappop(nums), heappop(nums)
heappush(nums, x * 2 + y)
ans += 1
return ans
|
Solution
|
python
|
catalyst-team__catalyst
|
catalyst/callbacks/misc.py
|
{
"start": 12699,
"end": 15039
}
|
class ____(Callback):
"""Executes only a pipeline part from the run.
Args:
num_batch_steps: number of batches to iterate in epoch
num_epoch_steps: number of epoch to perform in an experiment
Minimal working example (Notebook API):
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# data
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
# model training
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=8,
verbose=True,
callbacks=[
dl.CheckRunCallback(num_batch_steps=3, num_epoch_steps=3)
]
)
"""
def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 3):
"""Init."""
super().__init__(order=CallbackOrder.external)
self.num_batch_steps = num_batch_steps
self.num_epoch_steps = num_epoch_steps
def on_batch_end(self, runner: "IRunner"):
"""Check if iterated specified number of batches.
Args:
runner: current runner
"""
if runner.loader_batch_step >= self.num_batch_steps:
runner.need_early_stop = True
def on_epoch_end(self, runner: "IRunner"):
"""Check if iterated specified number of epochs.
Args:
runner: current runner
"""
if runner.epoch_step >= self.num_epoch_steps:
runner.need_early_stop = True
__all__ = [
"CheckRunCallback",
"EarlyStoppingCallback",
"TimerCallback",
"TqdmCallback",
]
|
CheckRunCallback
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constructor26.py
|
{
"start": 525,
"end": 1186
}
|
class ____(Generic[T, U]):
def __init__(self):
pass
def test2(self) -> None:
x1: Test2[U, T]
x2: Test2[T, T]
x3: Test2[T, U]
x1 = Test2[U, T]()
# This should generate an error.
x2 = Test2[U, T]()
# This should generate an error.
x3 = Test2[U, T]()
# This should generate an error.
x1 = Test2[T, T]()
x2 = Test2[T, T]()
# This should generate an error.
x3 = Test2[T, T]()
# This should generate an error.
x1 = Test2[T, U]()
# This should generate an error.
x2 = Test2[T, U]()
x3 = Test2[T, U]()
|
Test2
|
python
|
scipy__scipy
|
scipy/optimize/_nonlin.py
|
{
"start": 37726,
"end": 38816
}
|
class ____(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
|
LinearMixing
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_validating_admission_policy_status.py
|
{
"start": 383,
"end": 5781
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1Condition]',
'observed_generation': 'int',
'type_checking': 'V1TypeChecking'
}
attribute_map = {
'conditions': 'conditions',
'observed_generation': 'observedGeneration',
'type_checking': 'typeChecking'
}
def __init__(self, conditions=None, observed_generation=None, type_checking=None, local_vars_configuration=None): # noqa: E501
"""V1ValidatingAdmissionPolicyStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self._observed_generation = None
self._type_checking = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
if observed_generation is not None:
self.observed_generation = observed_generation
if type_checking is not None:
self.type_checking = type_checking
@property
def conditions(self):
"""Gets the conditions of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
The conditions represent the latest available observations of a policy's current state. # noqa: E501
:return: The conditions of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:rtype: list[V1Condition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1ValidatingAdmissionPolicyStatus.
The conditions represent the latest available observations of a policy's current state. # noqa: E501
:param conditions: The conditions of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:type: list[V1Condition]
"""
self._conditions = conditions
@property
def observed_generation(self):
"""Gets the observed_generation of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
The generation observed by the controller. # noqa: E501
:return: The observed_generation of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1ValidatingAdmissionPolicyStatus.
The generation observed by the controller. # noqa: E501
:param observed_generation: The observed_generation of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def type_checking(self):
"""Gets the type_checking of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:return: The type_checking of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:rtype: V1TypeChecking
"""
return self._type_checking
@type_checking.setter
def type_checking(self, type_checking):
"""Sets the type_checking of this V1ValidatingAdmissionPolicyStatus.
:param type_checking: The type_checking of this V1ValidatingAdmissionPolicyStatus. # noqa: E501
:type: V1TypeChecking
"""
self._type_checking = type_checking
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ValidatingAdmissionPolicyStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ValidatingAdmissionPolicyStatus):
return True
return self.to_dict() != other.to_dict()
|
V1ValidatingAdmissionPolicyStatus
|
python
|
realpython__materials
|
build-a-gui-with-wxpython/mp3_tag_editor.py
|
{
"start": 38,
"end": 1531
}
|
class ____(wx.Dialog):
def __init__(self, mp3):
title = 'Editing "{title}"'.format(title=mp3.tag.title)
super().__init__(parent=None, title=title)
self.mp3 = mp3
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.artist = wx.TextCtrl(self, value=self.mp3.tag.artist)
self.add_widgets("Artist", self.artist)
self.album = wx.TextCtrl(self, value=self.mp3.tag.album)
self.add_widgets("Album", self.album)
self.title = wx.TextCtrl(self, value=self.mp3.tag.title)
self.add_widgets("Title", self.title)
btn_sizer = wx.BoxSizer()
save_btn = wx.Button(self, label="Save")
save_btn.Bind(wx.EVT_BUTTON, self.on_save)
btn_sizer.Add(save_btn, 0, wx.ALL, 5)
btn_sizer.Add(wx.Button(self, id=wx.ID_CANCEL), 0, wx.ALL, 5)
self.main_sizer.Add(btn_sizer, 0, wx.CENTER)
self.SetSizer(self.main_sizer)
def add_widgets(self, label_text, text_ctrl):
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label=label_text, size=(50, -1))
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(text_ctrl, 1, wx.ALL | wx.EXPAND, 5)
self.main_sizer.Add(row_sizer, 0, wx.EXPAND)
def on_save(self, event):
self.mp3.tag.artist = self.artist.GetValue()
self.mp3.tag.album = self.album.GetValue()
self.mp3.tag.title = self.title.GetValue()
self.mp3.tag.save()
self.Close()
|
EditDialog
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataflow.py
|
{
"start": 50078,
"end": 52942
}
|
class ____(GoogleCloudBaseOperator):
"""
Deletes a Dataflow Data Pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowDeletePipelineOperator`
:param pipeline_name: The display name of the pipeline. In example
projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID it would be the PIPELINE_ID.
:param project_id: The ID of the GCP project that owns the job.
:param location: The location to direct the Data Pipelines instance to (for example us-central1).
:param gcp_conn_id: The connection ID to connect to the Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
pipeline_name: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.dataflow_hook: DataflowHook | None = None
self.response: dict | None = None
def execute(self, context: Context):
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
if self.pipeline_name is None:
raise AirflowException("Data Pipeline name not given; cannot run unspecified pipeline.")
if self.project_id is None:
raise AirflowException("Data Pipeline Project ID not given; cannot run pipeline.")
if self.location is None:
raise AirflowException("Data Pipeline location not given; cannot run pipeline.")
self.response = self.dataflow_hook.delete_data_pipeline(
pipeline_name=self.pipeline_name,
project_id=self.project_id,
location=self.location,
)
if self.response:
raise AirflowException(self.response)
return None
|
DataflowDeletePipelineOperator
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/conftest.py
|
{
"start": 493,
"end": 1275
}
|
class ____(AmberSnapshotExtension):
@classmethod
def get_snapshot_name( # pyright: ignore[reportIncompatibleMethodOverride]
cls,
*,
index: "SnapshotIndex",
test_location: "PyTestLocation",
) -> str:
snapshot_name = test_location.snapshot_name
# Exclude any of the GraphQLContextVariant suffixes from the snapshot name
# so that we don't have to re-generate an identical snapshot for each one
variant_start_index = snapshot_name.find("[")
if variant_start_index != -1:
snapshot_name = snapshot_name[:variant_start_index]
return f"{snapshot_name}[{index}]"
@pytest.fixture
def snapshot(snapshot):
return snapshot.use_extension(SharedSnapshotExtension)
|
SharedSnapshotExtension
|
python
|
keras-team__keras
|
keras/src/ops/nn.py
|
{
"start": 14896,
"end": 16125
}
|
class ____(Operation):
def __init__(self, approximate=True, *, name=None):
super().__init__(name=name)
self.approximate = approximate
def call(self, x):
return backend.nn.gelu(x, self.approximate)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.gelu", "keras.ops.nn.gelu"])
def gelu(x, approximate=True):
"""Gaussian Error Linear Unit (GELU) activation function.
If `approximate` is `True`, it is defined as:
`f(x) = 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
Or if `approximate` is `False`, it is defined as:
`f(x) = x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`.
Args:
x: Input tensor.
approximate: Approximate version of GELU activation. Defaults to `True`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_gelu = keras.ops.gelu(x)
>>> print(x_gelu)
array([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Gelu(approximate).symbolic_call(x)
return backend.nn.gelu(x, approximate)
|
Gelu
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_proxy_state.py
|
{
"start": 1159,
"end": 25335
}
|
class ____(ProxyWrapper):
def __init__(self, *args, **kwargs):
self.actor_handle = FakeProxyActor(*args, **kwargs)
self.is_ready_response = None
self.is_healthy_response = None
self.is_drained_response = False
self.worker_id = "mock_worker_id"
self.log_file_path = "mock_log_file_path"
self.shutdown = False
self.num_health_checks = 0
self.num_drain_checks = 0
@property
def actor_id(self) -> str:
pass
def is_ready(self, timeout_s: float) -> Optional[bool]:
return self.is_ready_response
def is_healthy(self, timeout_s: float) -> Optional[bool]:
self.num_health_checks += 1
return self.is_healthy_response
def is_drained(self, timeout_s: float) -> Optional[bool]:
self.num_drain_checks += 1
return self.is_drained_response
def is_shutdown(self):
return self.shutdown
def update_draining(self, draining: bool):
pass
def kill(self):
self.shutdown = True
def get_num_health_checks(self):
return self.num_health_checks
def get_num_drain_checks(self):
return self.num_health_checks
def _create_proxy_state_manager(
http_options: HTTPOptions = HTTPOptions(),
head_node_id: str = HEAD_NODE_ID,
cluster_node_info_cache=MockClusterNodeInfoCache(),
actor_proxy_wrapper_class=FakeProxyWrapper,
timer=Timer(),
) -> (ProxyStateManager, ClusterNodeInfoCache):
return (
ProxyStateManager(
http_options=http_options,
head_node_id=head_node_id,
cluster_node_info_cache=cluster_node_info_cache,
logging_config=LoggingConfig(),
actor_proxy_wrapper_class=actor_proxy_wrapper_class,
timer=timer,
),
cluster_node_info_cache,
)
def _create_proxy_state(
actor_proxy_wrapper_class=FakeProxyWrapper,
status: ProxyStatus = ProxyStatus.STARTING,
node_id: str = "mock_node_id",
timer=Timer(),
**kwargs,
) -> ProxyState:
state = ProxyState(
actor_proxy_wrapper=actor_proxy_wrapper_class(),
actor_name="alice",
node_id=node_id,
node_ip="mock_node_ip",
node_instance_id="mock_instance_id",
timer=timer,
)
state._set_status(status=status)
return state
@pytest.fixture
def number_of_worker_nodes() -> int:
return 100
@pytest.fixture
def all_nodes(number_of_worker_nodes) -> List[Tuple[str, str, str]]:
return [(HEAD_NODE_ID, "fake-head-ip", "fake-head-instance-id")] + [
(f"worker-node-id-{i}", f"fake-worker-ip-{i}", f"fake-instance-id-{i}")
for i in range(number_of_worker_nodes)
]
def _reconcile_and_check_proxy_status(state: ProxyState, status: ProxyStatus):
state.reconcile()
assert state.status == status
return True
def _update_and_check_proxy_state_manager(
proxy_state_manager: ProxyStateManager,
node_ids: List[str],
statuses: List[ProxyStatus],
**kwargs,
):
proxy_state_manager.update(**kwargs)
proxy_states = proxy_state_manager._proxy_states
assert all(
[
proxy_states[node_ids[idx]].status == statuses[idx]
for idx in range(len(node_ids))
]
), [proxy_state.status for proxy_state in proxy_states.values()]
return True
def test_node_selection(all_nodes):
all_node_ids = {node_id for node_id, _, _ in all_nodes}
# Test NoServer
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.NoServer)
)
cluster_node_info_cache.alive_nodes = all_nodes
assert proxy_state_manager._get_target_nodes(all_node_ids) == []
# Test HeadOnly
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.HeadOnly)
)
cluster_node_info_cache.alive_nodes = all_nodes
assert proxy_state_manager._get_target_nodes(all_node_ids) == all_nodes[:1]
# Test EveryNode
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode)
)
cluster_node_info_cache.alive_nodes = all_nodes
assert proxy_state_manager._get_target_nodes(all_node_ids) == all_nodes
# Test specific nodes
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode)
)
cluster_node_info_cache.alive_nodes = all_nodes
assert proxy_state_manager._get_target_nodes({HEAD_NODE_ID}) == [
(HEAD_NODE_ID, "fake-head-ip", "fake-head-instance-id")
]
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_PERIOD_S", 5)
def test_proxy_state_manager_restarts_unhealthy_proxies(all_nodes):
"""Test the update method in ProxyStateManager would
kill and restart unhealthy proxies.
"""
timer = MockTimer()
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
timer=timer
)
cluster_node_info_cache.alive_nodes = all_nodes
# First iteration, refresh state
proxy_state_manager.update()
prev_proxy_state = proxy_state_manager._proxy_states[HEAD_NODE_ID]
# Mark existing head-node proxy UNHEALTHY
prev_proxy_state._set_status(ProxyStatus.UNHEALTHY)
old_proxy = prev_proxy_state.actor_handle
# Continuously trigger update and wait for status to be changed to HEALTHY.
for _ in range(1):
proxy_state_manager.update(proxy_nodes=set(HEAD_NODE_ID))
# Advance timer by 5 (to perform a health-check)
timer.advance(5)
new_proxy_state = proxy_state_manager._proxy_states[HEAD_NODE_ID]
# Previous proxy's state stays UNHEALTHY
assert prev_proxy_state.status == ProxyStatus.UNHEALTHY
# Ensure the old proxy is getting shutdown.
assert prev_proxy_state._shutting_down
# New proxy's state should be STARTING
assert new_proxy_state.status == ProxyStatus.STARTING
assert new_proxy_state.proxy_restart_count == 1
new_proxy = new_proxy_state.actor_handle
# Ensure the new proxy is completely different object than old proxy.
assert new_proxy != old_proxy
def test_proxy_state_reconcile_shutting_down():
proxy_state = _create_proxy_state()
previous_status = proxy_state.status
proxy_state.shutdown()
# This should be no-op. The status of the http proxy state will not be changed.
proxy_state.reconcile()
current_status = proxy_state.status
# Ensure the proxy state is in the shutting down state.
assert proxy_state._shutting_down
# Ensure the status didn't change.
assert previous_status == current_status
def test_proxy_state_reconcile_readiness_check_succeed():
proxy_state = _create_proxy_state()
# Configure is_ready to be true
proxy_state._actor_proxy_wrapper.is_ready_response = True
# Ensure the proxy status before update is STARTING.
assert proxy_state.status == ProxyStatus.STARTING
# Ensure actor_details are set to the initial state when the proxy_state is created.
assert proxy_state.actor_details.worker_id is None
assert proxy_state.actor_details.log_file_path is None
assert proxy_state.actor_details.status == ProxyStatus.STARTING.value
# Continuously trigger update and wait for status to be changed.
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.HEALTHY
# Ensure actor_details are updated.
assert proxy_state.actor_details.worker_id == "mock_worker_id"
assert proxy_state.actor_details.log_file_path == "mock_log_file_path"
assert proxy_state.actor_details.status == ProxyStatus.HEALTHY.value
def test_proxy_state_reconcile_readiness_check_pending():
proxy_state = _create_proxy_state()
# Ensure the proxy status before update is STARTING.
assert proxy_state.status == ProxyStatus.STARTING
# When the proxy readiness check is pending, the proxy wrapper is_ready
# will return None
proxy_state._actor_proxy_wrapper.is_ready_response = None
# Trigger update. The status do not change, while readiness check is pending
for _ in range(10):
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.STARTING
# Unblock is_ready call, trigger update, and wait for status change to HEALTHY.
proxy_state._actor_proxy_wrapper.is_ready_response = True
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.HEALTHY
def test_proxy_state_reconcile_readiness_check_fails():
proxy_state = _create_proxy_state()
# Emulate readiness check failure
proxy_state._actor_proxy_wrapper.is_ready_response = False
# Ensure the proxy status before update is STARTING.
assert proxy_state.status == ProxyStatus.STARTING
# First failure shouldn't trigger state-transition to UNHEALTHY
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.STARTING
# After PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD failures, state should
# transition to UNHEALTHY
for _ in range(PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1):
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.UNHEALTHY
@patch("ray.serve._private.proxy_state.PROXY_READY_CHECK_TIMEOUT_S", 5)
def test_proxy_state_reconcile_health_check_succeed():
timer = MockTimer()
proxy_state = _create_proxy_state(time=timer)
# Emulate readiness check succeeding
proxy_state._actor_proxy_wrapper.is_ready_response = True
# Should transition to HEALTHY
proxy_state.reconcile()
assert proxy_state.status == ProxyStatus.HEALTHY
# Trigger update few more times and the status continue to be HEALTHY.
for _ in range(10):
_reconcile_and_check_proxy_status(proxy_state, ProxyStatus.HEALTHY)
# Advance timer by 5s
timer.advance(5)
# Health-checks should have been performed on every iteration
assert proxy_state._actor_proxy_wrapper.num_health_checks == 10
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_PERIOD_S", 5)
def test_proxy_state_reconcile_health_check_transient_failures():
timer = MockTimer()
# Start with HEALTHY state
proxy_state = _create_proxy_state(status=ProxyStatus.HEALTHY, timer=timer)
# Simulate health-checks failing
proxy_state._actor_proxy_wrapper.is_healthy_response = False
# Reconcile PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1 times, state should
# continue to stay HEALTHY
for _ in range(PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1):
_reconcile_and_check_proxy_status(proxy_state, ProxyStatus.HEALTHY)
# Advance timer by 5 (to trigger new health-check)
timer.advance(5)
assert (
proxy_state._actor_proxy_wrapper.get_num_health_checks()
== PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1
)
# Simulate health-checks passing
proxy_state._actor_proxy_wrapper.is_healthy_response = True
wait_for_condition(
condition_predictor=_reconcile_and_check_proxy_status,
state=proxy_state,
status=ProxyStatus.HEALTHY,
)
# Ensure _consecutive_health_check_failures is reset
assert proxy_state._consecutive_health_check_failures == 0
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_PERIOD_S", 5)
def test_proxy_state_reconcile_health_check_persistent_failures():
timer = MockTimer()
# Start with HEALTHY state
proxy_state = _create_proxy_state(status=ProxyStatus.HEALTHY, timer=timer)
# Simulate health-checks failing
proxy_state._actor_proxy_wrapper.is_healthy_response = False
# Reconcile PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1 times, state should
# continue to stay HEALTHY
for _ in range(PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1):
_reconcile_and_check_proxy_status(proxy_state, ProxyStatus.HEALTHY)
# Advance timer by 5 (to trigger new health-check)
timer.advance(5)
assert (
proxy_state._actor_proxy_wrapper.get_num_health_checks()
== PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD - 1
)
# On PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD iteration, state transitions to
# UNHEALTHY
_reconcile_and_check_proxy_status(proxy_state, ProxyStatus.UNHEALTHY)
# Ensure _consecutive_health_check_failures is correct
assert proxy_state._consecutive_health_check_failures == 3
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_PERIOD_S", 0)
@pytest.mark.parametrize("number_of_worker_nodes", [0, 1, 2, 3])
def test_proxy_manager_update_proxies_states(all_nodes, number_of_worker_nodes):
"""Test update draining logics.
When update nodes to inactive, head node http proxy should never be draining while
worker node http proxy should change to draining. When update nodes to active, head
node http proxy should continue to be healthy while worker node http proxy should
be healthy.
"""
manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode)
)
cluster_node_info_cache.alive_nodes = all_nodes
for node_id, _, _ in all_nodes:
manager._proxy_states[node_id] = _create_proxy_state(
status=ProxyStatus.HEALTHY,
node_id=node_id,
)
node_ids = [node_id for node_id, _, _ in all_nodes]
# No target proxy nodes
proxy_nodes = set()
# Head node proxy should continue to be HEALTHY.
# Worker node proxy should turn DRAINING.
wait_for_condition(
condition_predictor=_update_and_check_proxy_state_manager,
proxy_state_manager=manager,
node_ids=node_ids,
statuses=[ProxyStatus.HEALTHY]
+ [ProxyStatus.DRAINING] * number_of_worker_nodes,
proxy_nodes=proxy_nodes,
)
# All nodes are target proxy nodes
proxy_nodes = set(node_ids)
# Head node proxy should continue to be HEALTHY.
# Worker node proxy should turn HEALTHY.
wait_for_condition(
condition_predictor=_update_and_check_proxy_state_manager,
proxy_state_manager=manager,
node_ids=node_ids,
statuses=[ProxyStatus.HEALTHY] * (number_of_worker_nodes + 1),
proxy_nodes=proxy_nodes,
)
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_PERIOD_S", 5)
def test_proxy_state_reconcile_draining_success():
"""Test that the proxy will remain DRAINING even if health check succeeds."""
timer = MockTimer(start_time=0)
# Start with HEALTHY state
proxy_state = _create_proxy_state(status=ProxyStatus.HEALTHY, timer=timer)
# Simulate health-checks passing
proxy_state._actor_proxy_wrapper.is_healthy_response = True
# Simulate is_drained returning false
proxy_state._actor_proxy_wrapper.is_drained_response = False
for _ in range(10):
proxy_state.reconcile(draining=True)
assert proxy_state.status == ProxyStatus.DRAINING
# Advance timer by 5 (to trigger new health-check, drain-check)
timer.advance(5)
assert proxy_state._actor_proxy_wrapper.get_num_health_checks() == 10
assert proxy_state._actor_proxy_wrapper.get_num_drain_checks() == 10
# Make sure the status is still DRAINING
assert proxy_state.status == ProxyStatus.DRAINING
# Simulate is_drained request to ProxyActor pending (for 5 iterations)
proxy_state._actor_proxy_wrapper.is_drained_response = None
for _ in range(5):
proxy_state.reconcile(draining=True)
assert proxy_state.status == ProxyStatus.DRAINING
# Advance timer by 5 (to trigger new health-check, drain-check)
timer.advance(5)
assert proxy_state._actor_proxy_wrapper.get_num_health_checks() == 15
# No new drain checks will occur, since there's a pending one (not completed yet)
assert proxy_state._actor_proxy_wrapper.get_num_drain_checks() == 15
# Simulate draining completed
proxy_state._actor_proxy_wrapper.is_drained_response = True
# Advance timer by 5 (to trigger new health-check, drain-check on next iteration)
timer.advance(5)
proxy_state.reconcile(draining=True)
# State should transition to DRAINED
assert proxy_state.status == ProxyStatus.DRAINED
@patch("ray.serve._private.proxy_state.PROXY_DRAIN_CHECK_PERIOD_S", 5)
@pytest.mark.parametrize("number_of_worker_nodes", [1])
def test_proxy_actor_manager_removing_proxies(all_nodes, number_of_worker_nodes):
"""Test the state transition from DRAINING to UNHEALTHY for the proxy actor."""
assert len(all_nodes) == 2, "There should be 2 nodes in this test"
timer = MockTimer(start_time=0)
manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode),
timer=timer,
)
cluster_node_info_cache.alive_nodes = all_nodes
for node_id, _, _ in all_nodes:
manager._proxy_states[node_id] = _create_proxy_state(
status=ProxyStatus.STARTING,
node_id=node_id,
timer=timer,
)
manager._proxy_states[node_id]._actor_proxy_wrapper.is_ready_response = True
# All nodes are target proxy nodes
node_ids = [node_id for node_id, _, _ in all_nodes]
worker_node_id = node_ids[1]
worker_proxy_state = manager._proxy_states[worker_node_id]
# Reconcile all proxies states
manager.update(
proxy_nodes=(set(node_ids)),
)
# Assert all proxies are HEALTHY
proxy_statuses = [manager._proxy_states[node_id].status for node_id in node_ids]
assert [ProxyStatus.HEALTHY, ProxyStatus.HEALTHY] == proxy_statuses
# Reconcile proxies with empty set of target nodes (ie only proxy on the head-node
# should be preserved all the other should be drained)
worker_proxy_state._actor_proxy_wrapper.is_drained_response = False
N = 10
for _ in range(N):
manager.update(
proxy_nodes=set(),
)
timer.advance(5)
# Assert that
# - Head-node proxy is HEALTHY4
# - Worker node proxy is DRAINING
proxy_statuses = [manager._proxy_states[node_id].status for node_id in node_ids]
assert [ProxyStatus.HEALTHY, ProxyStatus.DRAINING] == proxy_statuses
assert worker_proxy_state._actor_proxy_wrapper.get_num_drain_checks() == N
# Mark target proxy as fully drained
worker_proxy_state._actor_proxy_wrapper.is_drained_response = True
# Reconcile proxies with empty set of target nodes (worker node proxy
# will be shutdown by now)
manager.update(
proxy_nodes=set(),
)
assert len(manager._proxy_states) == 1
assert manager._proxy_states[HEAD_NODE_ID].status == ProxyStatus.HEALTHY
def test_is_ready_for_shutdown(all_nodes):
"""Test `is_ready_for_shutdown()` returns True the correct state.
Before `shutdown()` is called, `is_ready_for_shutdown()` should return false. After
`shutdown()` is called and all proxy actor are killed, `is_ready_for_shutdown()`
should return true.
"""
manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode)
)
cluster_node_info_cache.alive_nodes = all_nodes
for node_id, _, _ in all_nodes:
manager._proxy_states[node_id] = _create_proxy_state(
status=ProxyStatus.HEALTHY,
node_id=node_id,
)
# Ensure before shutdown, manager is not shutdown
assert not manager.is_ready_for_shutdown()
manager.shutdown()
# Ensure after shutdown, manager is shutdown and all proxy states are shutdown
def check_is_ready_for_shutdown():
return manager.is_ready_for_shutdown()
wait_for_condition(check_is_ready_for_shutdown)
@patch("ray.serve._private.proxy_state.PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD", 1)
@pytest.mark.parametrize("number_of_worker_nodes", [1])
def test_proxy_state_manager_timing_out_on_start(number_of_worker_nodes, all_nodes):
"""Test update method on ProxyStateManager when the proxy state is STARTING and
when the ready call takes longer than PROXY_READY_CHECK_TIMEOUT_S.
The proxy state started with STARTING. After update is called, ready calls takes
some time to finish. The proxy state manager will restart the proxy state after
PROXY_READY_CHECK_TIMEOUT_S. After the next period of check_health call,
the proxy state manager will check on backoff timeout, not immediately
restarting the proxy states, and eventually set the proxy state to HEALTHY.
"""
fake_time = MockTimer()
proxy_state_manager, cluster_node_info_cache = _create_proxy_state_manager(
http_options=HTTPOptions(location=DeploymentMode.EveryNode),
timer=fake_time,
)
cluster_node_info_cache.alive_nodes = all_nodes
node_ids = {node[0] for node in all_nodes}
# Run update to create proxy states.
proxy_state_manager.update(proxy_nodes=node_ids)
# Ensure 2 proxies are created, one for the head node and another for the worker.
assert len(proxy_state_manager._proxy_states) == len(node_ids)
# Ensure the proxy state statuses before update are STARTING, set the
# readiness check to failing
for node_id in node_ids:
proxy_state = proxy_state_manager._proxy_states[node_id]
assert proxy_state.status == ProxyStatus.STARTING
proxy_state._actor_proxy_wrapper.is_ready_response = False
# Capture current proxy states (prior to updating)
prev_proxy_states = dict(proxy_state_manager._proxy_states)
# Trigger PSM to reconcile
proxy_state_manager.update(proxy_nodes=node_ids)
# Ensure the proxy state statuses before update are STARTING, set the
# readiness check to failing
for node_id in node_ids:
proxy_state = proxy_state_manager._proxy_states[node_id]
prev_proxy_state = prev_proxy_states[node_id]
# Assert
# - All proxies are restarted
# - Previous proxy states are UNHEALTHY
# - New proxy states are STARTING
assert proxy_state != prev_proxy_state
assert prev_proxy_state.status == ProxyStatus.UNHEALTHY
assert proxy_state.status == ProxyStatus.STARTING
# Mark new proxy readiness checks as passing
proxy_state._actor_proxy_wrapper.is_ready_response = True
# Capture current proxy states again (prior to updating)
prev_proxy_states = dict(proxy_state_manager._proxy_states)
# Trigger PSM to reconcile
proxy_state_manager.update(proxy_nodes=node_ids)
# Ensure the proxy state statuses before update are STARTING, set the
# readiness check to failing
for node_id in node_ids:
proxy_state = proxy_state_manager._proxy_states[node_id]
prev_proxy_state = prev_proxy_states[node_id]
# Assert
# - All proxies are restarted
# - Previous proxy states are UNHEALTHY
# - New proxy states are STARTING
assert proxy_state == prev_proxy_state
assert prev_proxy_state.status == ProxyStatus.HEALTHY
assert proxy_state.status == ProxyStatus.HEALTHY
def test_proxy_state_manager_get_targets(all_nodes):
"""Test the get_targets method on ProxyStateManager."""
manager, cluster_node_info_cache = _create_proxy_state_manager(
HTTPOptions(location=DeploymentMode.EveryNode)
)
cluster_node_info_cache.alive_nodes = all_nodes
for node_id, _, _ in all_nodes:
manager._proxy_states[node_id] = _create_proxy_state(
status=ProxyStatus.HEALTHY,
node_id=node_id,
)
manager._proxy_states[all_nodes[-1][0]].try_update_status(ProxyStatus.DRAINED)
targets = manager.get_targets(RequestProtocol.HTTP)
assert len(targets) == len(all_nodes) - 1
assert targets[0].ip == "mock_node_ip"
assert targets[0].port == 8000
assert targets[0].instance_id == "mock_instance_id"
assert targets[0].name == "alice"
targets = manager.get_targets(RequestProtocol.GRPC)
assert len(targets) == 0
with pytest.raises(ValueError):
manager.get_targets("invalid_protocol")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
FakeProxyWrapper
|
python
|
pexpect__pexpect
|
tests/test_popen_spawn.py
|
{
"start": 1087,
"end": 5018
}
|
class ____ (PexpectTestCase.PexpectTestCase):
def test_expect_basic(self):
p = PopenSpawn('cat', timeout=5)
p.sendline(b'Hello')
p.sendline(b'there')
p.sendline(b'Mr. Python')
p.expect(b'Hello')
p.expect(b'there')
p.expect(b'Mr. Python')
p.sendeof()
p.expect(pexpect.EOF)
def test_expect_exact_basic(self):
p = PopenSpawn('cat', timeout=5)
p.sendline(b'Hello')
p.sendline(b'there')
p.sendline(b'Mr. Python')
p.expect_exact(b'Hello')
p.expect_exact(b'there')
p.expect_exact(b'Mr. Python')
p.sendeof()
p.expect_exact(pexpect.EOF)
def test_expect(self):
the_old_way = subprocess.Popen(args=['ls', '-l', '/bin'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
p = PopenSpawn('ls -l /bin')
the_new_way = b''
while 1:
i = p.expect([b'\n', pexpect.EOF])
the_new_way = the_new_way + p.before
if i == 1:
break
the_new_way += b'\n'
the_new_way = the_new_way.rstrip()
assert the_old_way == the_new_way, len(the_old_way) - len(the_new_way)
def test_expect_exact(self):
the_old_way = subprocess.Popen(args=['ls', '-l', '/bin'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
p = PopenSpawn('ls -l /bin')
the_new_way = b''
while 1:
i = p.expect_exact([b'\n', pexpect.EOF])
the_new_way = the_new_way + p.before
if i == 1:
break
the_new_way += b'\n'
the_new_way = the_new_way.rstrip()
assert the_old_way == the_new_way, len(the_old_way) - len(the_new_way)
p = PopenSpawn('echo hello.?world')
i = p.expect_exact(b'.?')
self.assertEqual(p.before, b'hello')
self.assertEqual(p.after, b'.?')
def test_expect_eof(self):
the_old_way = subprocess.Popen(args=['ls', '-l', '/bin'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
p = PopenSpawn('ls -l /bin')
# This basically tells it to read everything. Same as pexpect.run()
# function.
p.expect(pexpect.EOF)
the_new_way = p.before.rstrip()
assert the_old_way == the_new_way, len(the_old_way) - len(the_new_way)
def test_expect_timeout(self):
p = PopenSpawn('cat', timeout=5)
p.expect(pexpect.TIMEOUT) # This tells it to wait for timeout.
self.assertEqual(p.after, pexpect.TIMEOUT)
def test_unexpected_eof(self):
p = PopenSpawn('ls -l /bin')
try:
p.expect('_Z_XY_XZ') # Probably never see this in ls output.
except pexpect.EOF:
pass
else:
self.fail('Expected an EOF exception.')
def test_bad_arg(self):
p = PopenSpawn('cat')
with self.assertRaisesRegex(TypeError, '.*must be one of'):
p.expect(1)
with self.assertRaisesRegex(TypeError, '.*must be one of'):
p.expect([1, b'2'])
with self.assertRaisesRegex(TypeError, '.*must be one of'):
p.expect_exact(1)
with self.assertRaisesRegex(TypeError, '.*must be one of'):
p.expect_exact([1, b'2'])
def test_timeout_none(self):
p = PopenSpawn('echo abcdef', timeout=None)
p.expect('abc')
p.expect_exact('def')
p.expect(pexpect.EOF)
def test_crlf(self):
p = PopenSpawn('echo alpha beta')
assert p.read() == b'alpha beta' + p.crlf
def test_crlf_encoding(self):
p = PopenSpawn('echo alpha beta', encoding='utf-8')
assert p.read() == 'alpha beta' + p.crlf
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(ExpectTestCase)
|
ExpectTestCase
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/layouts_test.py
|
{
"start": 11222,
"end": 14358
}
|
class ____(DeltaGeneratorTestCase):
def test_label_required(self):
"""Test that label is required"""
with pytest.raises(TypeError):
st.expander()
def test_just_label(self):
"""Test that it can be called with no params"""
expander = st.expander("label")
with expander:
# Noop
pass
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.expandable.label == "label"
assert not expander_block.add_block.expandable.expanded
def test_allow_empty(self):
"""Test that it correctly applies allow_empty param."""
st.expander("label")
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.allow_empty
def test_width_config(self):
"""Test that width configuration works correctly"""
st.expander("label", width=200)
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.width_config.pixel_width == 200
st.expander("label", width="stretch")
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.width_config.use_stretch
@parameterized.expand(
[
(None,),
("invalid",),
(-100,),
(0,),
("content",),
]
)
def test_invalid_width(self, invalid_width):
"""Test that invalid width values raise an error"""
with pytest.raises(StreamlitAPIException):
st.expander("label", width=invalid_width)
def test_valid_emoji_icon(self):
"""Test that it can be called with an emoji icon"""
expander = st.expander("label", icon="🦄")
with expander:
# Noop
pass
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.expandable.label == "label"
assert expander_block.add_block.expandable.icon == "🦄"
def test_valid_material_icon(self):
"""Test that it can be called with a material icon"""
expander = st.expander("label", icon=":material/download:")
with expander:
# Noop
pass
expander_block = self.get_delta_from_queue()
assert expander_block.add_block.expandable.label == "label"
assert expander_block.add_block.expandable.icon == ":material/download:"
def test_invalid_emoji_icon(self):
"""Test that it throws an error on invalid emoji icon"""
with pytest.raises(StreamlitAPIException) as e:
st.expander("label", icon="invalid")
assert (
str(e.value)
== 'The value "invalid" is not a valid emoji. Shortcodes are not allowed, '
"please use a single character instead."
)
def test_invalid_material_icon(self):
"""Test that it throws an error on invalid material icon"""
icon = ":material/invalid:"
with pytest.raises(StreamlitAPIException) as e:
st.expander("label", icon=icon)
assert "is not a valid Material icon" in str(e.value)
|
ExpanderTest
|
python
|
getsentry__sentry
|
tests/sentry/uptime/endpoints/test_project_uptime_alert_index.py
|
{
"start": 493,
"end": 628
}
|
class ____(UptimeAlertBaseEndpointTest):
endpoint = "sentry-api-0-project-uptime-alert-index"
|
ProjectUptimeAlertIndexBaseEndpointTest
|
python
|
django-haystack__django-haystack
|
test_haystack/test_query.py
|
{
"start": 14312,
"end": 14507
}
|
class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="key")
def get_model(self):
return CharPKMockModel
|
CharPKMockModelSearchIndex
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_inline_schemas/pipeline.py
|
{
"start": 8304,
"end": 14656
}
|
class ____:
ref: str
file_path: str
def copy_directory(src: Path, dest: Path) -> None:
if dest.exists():
shutil.rmtree(dest)
shutil.copytree(src, dest)
def _has_subdirectory(directory: Path) -> bool:
# Iterate through all items in the directory
for entry in directory.iterdir():
# Check if this entry is a directory
if entry.is_dir():
return True
return False
def _get_stream_name(yaml_stream: dict) -> str | None:
if "name" in yaml_stream:
return yaml_stream["name"]
if "$parameters" in yaml_stream and "name" in yaml_stream["$parameters"]:
return yaml_stream["$parameters"]["name"]
return None
def _update_json_loaders(
connector_path: Path,
data: dict,
streams: dict[str, JsonStream],
loaders: List[JsonLoaderNode],
) -> None:
logger = main_logger
for loader in loaders:
if "{{" in loader.file_path:
# remove templated paths and their references
(f" Removing reference: {loader.ref}")
_remove_reference(data, None, loader, [])
continue
else:
# direct pointer to a file. update.
file_path = Path(os.path.abspath(os.path.join(connector_path, loader.file_path)))
if not file_path.is_file():
logger.info(f" JsonFileSchemaLoader not found: {file_path}")
continue
schema_loader = _load_reference(data, loader.ref)
if not schema_loader:
logger.info(f" JsonFileSchemaLoader reference not found: {loader.ref}")
continue
_update_inline_schema(schema_loader, streams, file_path.stem)
def _update_inline_schema(schema_loader: dict, json_streams: dict[str, JsonStream], file_name: str) -> None:
logger = main_logger
if file_name not in json_streams:
logger.info(f" Stream {file_name} not found in JSON schemas.")
return
json_stream = json_streams[file_name]
schema_loader["type"] = "InlineSchemaLoader"
schema_loader["schema"] = json_stream.schema
json_stream.file_path.unlink()
json_streams.pop(file_name)
def _remove_reference(parent: Any, key: str | int | None, loader: JsonLoaderNode, path: List[str]) -> bool: # noqa: ANN401
logger = main_logger
if key is None:
data = parent
else:
data = parent[key]
if isinstance(data, dict):
ref = f"#/{'/'.join(path)}"
if ref == loader.ref:
logger.info(f" Removing reference: {ref}")
return True
elif "$ref" in data and data["$ref"] == loader.ref:
logger.info(f" Found reference: {ref}")
return True
else:
todelete = []
for key, value in data.items():
if _remove_reference(data, key, loader, path + [str(key)]):
todelete.append(key)
for key in todelete:
del data[key]
elif isinstance(data, list):
for i, value in enumerate(data):
ref = f"Array[{str(i)}]"
_remove_reference(data, i, loader, path + [ref])
return False
def _load_reference(data: dict, ref: str) -> dict | None:
yaml_stream = data
path = ref.split("/")
for p in path:
if p == "#":
continue
if p.startswith("Array["):
i = int(p[6:-1])
if not isinstance(yaml_stream, list) or len(yaml_stream) <= i:
return None
yaml_stream = yaml_stream[i]
continue
if p not in yaml_stream:
return None
yaml_stream = yaml_stream[p]
return yaml_stream
def _find_json_loaders(data: Any, path: List[str]) -> List[JsonLoaderNode]: # noqa: ANN401
logger = main_logger
loaders: List[JsonLoaderNode] = []
if isinstance(data, dict):
if "type" in data and data["type"] == "JsonFileSchemaLoader":
ref = f"#/{'/'.join(path)}"
if "file_path" in data:
loaders.append(JsonLoaderNode(ref, data["file_path"]))
else:
logger.info(f" !! JsonFileSchemaLoader missing file_path: {ref}")
else:
for key, value in data.items():
loaders += _find_json_loaders(value, path + [key])
elif isinstance(data, list):
for i, value in enumerate(data):
loaders += _find_json_loaders(value, path + [f"Array[{str(i)}]"])
return loaders
def _parse_json_streams(python_path: Path) -> dict[str, JsonStream]:
streams: dict[str, JsonStream] = {}
schemas_path = python_path / SCHEMAS_DIR_NAME
if not schemas_path.is_dir():
return streams
for schema_file in schemas_path.iterdir():
if schema_file.is_file() and schema_file.suffix == ".json":
stream_name = schema_file.stem
with schema_file.open("r") as file:
# read json
schema = json.load(file)
streams[stream_name] = JsonStream(
name=stream_name,
schema=schema,
file_path=schema_file,
)
return streams
async def run_connector_migrate_to_inline_schemas_pipeline(context: ConnectorContext, semaphore: "Semaphore") -> Report:
restore_original_state = RestoreInlineState(context)
context.targeted_platforms = [LOCAL_BUILD_PLATFORM]
steps_to_run: STEP_TREE = []
steps_to_run.append(
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.INLINE_CANDIDATE,
step=CheckIsInlineCandidate(context),
)
]
)
steps_to_run.append(
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.INLINE_MIGRATION,
step=InlineSchemas(context),
depends_on=[CONNECTOR_TEST_STEP_ID.INLINE_CANDIDATE],
)
]
)
steps_to_run.append(
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.INLINE_CLEANUP,
step=RemoveUnusedJsonSchamas(context),
depends_on=[CONNECTOR_TEST_STEP_ID.INLINE_MIGRATION],
)
]
)
return await run_connector_steps(context, semaphore, steps_to_run, restore_original_state=restore_original_state)
|
JsonLoaderNode
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
|
{
"start": 6555,
"end": 6604
}
|
class ____: ...
TE1 = TypeVar("TE1", bound=E1)
|
E1
|
python
|
sphinx-doc__sphinx
|
sphinx/util/template.py
|
{
"start": 2562,
"end": 3543
}
|
class ____(SphinxRenderer):
def __init__(
self,
template_path: Sequence[str | os.PathLike[str]] | None = None,
latex_engine: str | None = None,
) -> None:
if template_path is None:
template_path = (_LATEX_TEMPLATES_PATH,)
super().__init__(template_path)
# use texescape as escape filter
escape = partial(texescape.escape, latex_engine=latex_engine)
self.env.filters['e'] = escape
self.env.filters['escape'] = escape
self.env.filters['eabbr'] = texescape.escape_abbr
# use JSP/eRuby like tagging instead because curly bracket; the default
# tagging of jinja2 is not good for LaTeX sources.
self.env.variable_start_string = '<%='
self.env.variable_end_string = '%>'
self.env.block_start_string = '<%'
self.env.block_end_string = '%>'
self.env.comment_start_string = '<#'
self.env.comment_end_string = '#>'
|
LaTeXRenderer
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py
|
{
"start": 13253,
"end": 15454
}
|
class ____(ScoreStringEvalChain):
"""A chain for scoring the output of a model on a scale of 1-10.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
"""
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
`True` if the chain requires a reference, `False` otherwise.
"""
return True
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: PromptTemplate | None = None,
criteria: CRITERIA_TYPE | str | None = None,
normalize_by: float | None = None,
**kwargs: Any,
) -> LabeledScoreStringEvalChain:
"""Initialize the LabeledScoreStringEvalChain from an LLM.
Args:
llm: The LLM to use.
prompt: The prompt to use.
criteria: The criteria to use.
normalize_by: The value to normalize the score by.
**kwargs: Additional keyword arguments.
Returns:
The initialized LabeledScoreStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
"""
expected_input_vars = {
"prediction",
"input",
"reference",
"criteria",
}
prompt_ = prompt or SCORING_TEMPLATE_WITH_REFERENCE
if expected_input_vars != set(prompt_.input_variables):
msg = (
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
raise ValueError(msg)
criteria_ = resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()).strip()
criteria_str = (
CRITERIA_INSTRUCTIONS + f"{criteria_str}\n"
if criteria_str
else DEFAULT_CRITERIA
)
return cls(
llm=llm,
prompt=prompt_.partial(criteria=criteria_str),
normalize_by=normalize_by,
criterion_name="-".join(criteria_),
**kwargs,
)
|
LabeledScoreStringEvalChain
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_reflection.py
|
{
"start": 76027,
"end": 80566
}
|
class ____(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "default"
@testing.fixture
def tab_wo_fks(self, connection, metadata):
meta = metadata
foo = Table(
"foo",
meta,
*[
Column(n, sa.String(30))
for n in ["a", "b", "c", "d", "e", "f"]
],
)
meta.create_all(connection)
return foo
@testing.fixture
def tab_w_fks(self, connection, metadata):
Table(
"a",
metadata,
Column("x", Integer, primary_key=True),
test_needs_fk=True,
)
b = Table(
"b",
metadata,
Column("x", Integer, primary_key=True),
Column("q", Integer),
Column("p", Integer),
Column("r", Integer, ForeignKey("a.x")),
Column("s", Integer),
Column("t", Integer),
test_needs_fk=True,
)
metadata.create_all(connection)
return b
def test_include_columns(self, connection, tab_wo_fks):
foo = tab_wo_fks
meta2 = MetaData()
foo = Table(
"foo",
meta2,
autoload_with=connection,
include_columns=["b", "f", "e"],
)
# test that cols come back in original order
eq_([c.name for c in foo.c], ["b", "e", "f"])
for c in ("b", "f", "e"):
assert c in foo.c
for c in ("a", "c", "d"):
assert c not in foo.c
# test against a table which is already reflected
meta3 = MetaData()
foo = Table("foo", meta3, autoload_with=connection)
foo = Table(
"foo", meta3, include_columns=["b", "f", "e"], extend_existing=True
)
eq_([c.name for c in foo.c], ["b", "e", "f"])
for c in ("b", "f", "e"):
assert c in foo.c
for c in ("a", "c", "d"):
assert c not in foo.c
@testing.combinations(True, False, argnames="resolve_fks")
def test_include_cols_skip_fk_col(
self, connection, tab_w_fks, resolve_fks
):
"""test #8100"""
m2 = MetaData()
b2 = Table(
"b",
m2,
autoload_with=connection,
resolve_fks=resolve_fks,
include_columns=["x", "q", "p"],
)
eq_([c.name for c in b2.c], ["x", "q", "p"])
# no FK, whether or not resolve_fks was called
eq_(b2.constraints, {b2.primary_key})
b2a = b2.alias()
eq_([c.name for c in b2a.c], ["x", "q", "p"])
self.assert_compile(select(b2), "SELECT b.x, b.q, b.p FROM b")
self.assert_compile(
select(b2.alias()),
"SELECT b_1.x, b_1.q, b_1.p FROM b AS b_1",
)
def test_table_works_minus_fks(self, connection, tab_w_fks):
"""test #8101"""
m2 = MetaData()
b2 = Table(
"b",
m2,
autoload_with=connection,
resolve_fks=False,
)
eq_([c.name for c in b2.c], ["x", "q", "p", "r", "s", "t"])
b2a = b2.alias()
eq_([c.name for c in b2a.c], ["x", "q", "p", "r", "s", "t"])
self.assert_compile(
select(b2), "SELECT b.x, b.q, b.p, b.r, b.s, b.t FROM b"
)
b2a_1 = b2.alias()
self.assert_compile(
select(b2a_1),
"SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t FROM b AS b_1",
)
# reflecting the related table
a2 = Table("a", m2, autoload_with=connection)
# the existing alias doesn't know about it
with expect_raises_message(
sa.exc.InvalidRequestError,
"Foreign key associated with column 'Anonymous alias of b.r' "
"could not find "
"table 'a' with which to generate a foreign key to target "
"column 'x'",
):
select(b2a_1).join(a2).compile()
# can still join manually (needed to fix inside of util for this...)
self.assert_compile(
select(b2a_1).join(a2, b2a_1.c.r == a2.c.x),
"SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t "
"FROM b AS b_1 JOIN a ON b_1.r = a.x",
)
# a new alias does know about it however
self.assert_compile(
select(b2.alias()).join(a2),
"SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t "
"FROM b AS b_1 JOIN a ON a.x = b_1.r",
)
|
IncludeColsFksTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-distinct-integers-after-removing-zeros.py
|
{
"start": 48,
"end": 690
}
|
class ____(object):
def countDistinct(self, n):
"""
:type n: int
:rtype: int
"""
def reverse(n):
result, base = 0, 1
while n:
n, r = divmod(n, 10)
result = result*10+r
base *= 9
return result, base
m, base = reverse(n+1)
result = (base-9)//(9-1)
base //= 9
while base:
m, r = divmod(m, 10)
if r == 0:
break
result += (r-1)*base
base //= 9
return result
# Time: O(logn)
# Space: O(logn)
# combinatorics
|
Solution
|
python
|
tornadoweb__tornado
|
tornado/test/gen_test.py
|
{
"start": 20086,
"end": 22437
}
|
class ____(AsyncTestCase):
@gen_test
def test_timeout(self):
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(datetime.timedelta(seconds=0.1), Future())
@gen_test
def test_completes_before_timeout(self):
future = Future() # type: Future[str]
self.io_loop.add_timeout(
datetime.timedelta(seconds=0.1), lambda: future.set_result("asdf")
)
result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
self.assertEqual(result, "asdf")
@gen_test
def test_fails_before_timeout(self):
future = Future() # type: Future[str]
self.io_loop.add_timeout(
datetime.timedelta(seconds=0.1),
lambda: future.set_exception(ZeroDivisionError()),
)
with self.assertRaises(ZeroDivisionError):
yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
@gen_test
def test_already_resolved(self):
future = Future() # type: Future[str]
future.set_result("asdf")
result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
self.assertEqual(result, "asdf")
@gen_test
def test_timeout_concurrent_future(self):
# A concurrent future that does not resolve before the timeout.
with futures.ThreadPoolExecutor(1) as executor:
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(
self.io_loop.time(), executor.submit(time.sleep, 0.1)
)
@gen_test
def test_completed_concurrent_future(self):
# A concurrent future that is resolved before we even submit it
# to with_timeout.
with futures.ThreadPoolExecutor(1) as executor:
def dummy():
pass
f = executor.submit(dummy)
f.result() # wait for completion
yield gen.with_timeout(datetime.timedelta(seconds=3600), f)
@gen_test
def test_normal_concurrent_future(self):
# A conccurrent future that resolves while waiting for the timeout.
with futures.ThreadPoolExecutor(1) as executor:
yield gen.with_timeout(
datetime.timedelta(seconds=3600),
executor.submit(lambda: time.sleep(0.01)),
)
|
WithTimeoutTest
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_index_returned.py
|
{
"start": 720,
"end": 852
}
|
class ____:
""" __index__ returns str """
def __index__(self): # [invalid-index-returned]
return "42"
|
SecondBadIndex
|
python
|
realpython__materials
|
arcade-platformer/arcade_platformer/14_enemies.py
|
{
"start": 7819,
"end": 21799
}
|
class ____(arcade.View):
def __init__(self) -> None:
super().__init__()
# These lists will hold different sets of sprites
self.coins = None
self.background = None
self.walls = None
self.ladders = None
self.goals = None
self.enemies = None
# One sprite for the player, no more is needed
self.player = None
# We need a physics engine as well
self.physics_engine = None
# Someplace to keep score
self.score = 0
# Which level are we on?
self.level = 1
# Load up our sounds here
self.coin_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "coin.wav")
)
self.jump_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "jump.wav")
)
self.victory_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "victory.wav")
)
# Check if a joystick is connected
joysticks = arcade.get_joysticks()
if joysticks:
# If so, get the first one
self.joystick = joysticks[0]
self.joystick.open()
else:
# If not, flag it so we won't use it
print("There are no Joysticks")
self.joystick = None
def setup(self) -> None:
"""Sets up the game for the current level"""
# Get the current map based on the level
map_name = f"platform_level_{self.level:02}.tmx"
map_path = ASSETS_PATH / map_name
# What are the names of the layers?
wall_layer = "ground"
coin_layer = "coins"
goal_layer = "goal"
background_layer = "background"
ladders_layer = "ladders"
# Load the current map
game_map = arcade.tilemap.read_tmx(str(map_path))
# Load the layers
self.background = arcade.tilemap.process_layer(
game_map, layer_name=background_layer, scaling=MAP_SCALING
)
self.goals = arcade.tilemap.process_layer(
game_map, layer_name=goal_layer, scaling=MAP_SCALING
)
self.walls = arcade.tilemap.process_layer(
game_map, layer_name=wall_layer, scaling=MAP_SCALING
)
self.ladders = arcade.tilemap.process_layer(
game_map, layer_name=ladders_layer, scaling=MAP_SCALING
)
self.coins = arcade.tilemap.process_layer(
game_map, layer_name=coin_layer, scaling=MAP_SCALING
)
# Set the background color
background_color = arcade.color.FRESH_AIR
if game_map.background_color:
background_color = game_map.background_color
arcade.set_background_color(background_color)
# Find the edge of the map to control viewport scrolling
self.map_width = (
game_map.map_size.width - 1
) * game_map.tile_size.width
# Create the player sprite, if they're not already setup
if not self.player:
self.player = self.create_player_sprite()
# Move the player sprite back to the beginning
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player.change_x = 0
self.player.change_y = 0
# Setup our enemies
self.enemies = self.create_enemy_sprites()
# Reset the viewport
self.view_left = 0
self.view_bottom = 0
# Load the physics engine for this map
self.physics_engine = arcade.PhysicsEnginePlatformer(
player_sprite=self.player,
platforms=self.walls,
gravity_constant=GRAVITY,
ladders=self.ladders,
)
def create_enemy_sprites(self) -> arcade.SpriteList:
"""Creates enemy sprites appropriate for the current level
Returns:
A Sprite List of enemies"""
enemies = arcade.SpriteList()
# Only enemies on level 2
if self.level == 2:
enemies.append(Enemy(1464, 320))
return enemies
def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:
# Where are the player images stored?
texture_path = ASSETS_PATH / "images" / "player"
# Setup the appropriate textures
walking_paths = [
texture_path / f"alienGreen_walk{x}.png" for x in (1, 2)
]
climbing_paths = [
texture_path / f"alienGreen_climb{x}.png" for x in (1, 2)
]
standing_path = texture_path / "alienGreen_stand.png"
# Load them all now
walking_right_textures = [
arcade.load_texture(texture) for texture in walking_paths
]
walking_left_textures = [
arcade.load_texture(texture, mirrored=True)
for texture in walking_paths
]
walking_up_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
walking_down_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
standing_right_textures = [arcade.load_texture(standing_path)]
standing_left_textures = [
arcade.load_texture(standing_path, mirrored=True)
]
# Create the sprite
player = arcade.AnimatedWalkingSprite()
# Add the proper textures
player.stand_left_textures = standing_left_textures
player.stand_right_textures = standing_right_textures
player.walk_left_textures = walking_left_textures
player.walk_right_textures = walking_right_textures
player.walk_up_textures = walking_up_textures
player.walk_down_textures = walking_down_textures
# Set the player defaults
player.center_x = PLAYER_START_X
player.center_y = PLAYER_START_Y
player.state = arcade.FACE_RIGHT
# Set the initial texture
player.texture = player.stand_right_textures[0]
return player
def on_key_press(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- Which key was pressed
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [arcade.key.LEFT, arcade.key.J]:
self.player.change_x = -PLAYER_MOVE_SPEED
elif key in [arcade.key.RIGHT, arcade.key.L]:
self.player.change_x = PLAYER_MOVE_SPEED
# Check if player can climb up or down
elif key in [arcade.key.UP, arcade.key.I]:
if self.physics_engine.is_on_ladder():
self.player.change_y = PLAYER_MOVE_SPEED
elif key in [arcade.key.DOWN, arcade.key.K]:
if self.physics_engine.is_on_ladder():
self.player.change_y = -PLAYER_MOVE_SPEED
# Check if we can jump
elif key == arcade.key.SPACE:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Did the user want to pause?
elif key == arcade.key.ESCAPE:
# Pass the current view to preserve this view's state
pause = PauseView(self)
self.window.show_view(pause)
def on_key_release(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- The key which was released
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [
arcade.key.LEFT,
arcade.key.J,
arcade.key.RIGHT,
arcade.key.L,
]:
self.player.change_x = 0
# Check if player can climb up or down
elif key in [
arcade.key.UP,
arcade.key.I,
arcade.key.DOWN,
arcade.key.K,
]:
if self.physics_engine.is_on_ladder():
self.player.change_y = 0
def on_update(self, delta_time: float) -> None:
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
# First, check for joystick movement
if self.joystick:
# Check if we're in the dead zone
if abs(self.joystick.x) > DEAD_ZONE:
self.player.change_x = self.joystick.x * PLAYER_MOVE_SPEED
else:
self.player.change_x = 0
if abs(self.joystick.y) > DEAD_ZONE:
if self.physics_engine.is_on_ladder():
self.player.change_y = self.joystick.y * PLAYER_MOVE_SPEED
else:
self.player.change_y = 0
# Did the user press the jump button?
if self.joystick.buttons[0]:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Update the player animation
self.player.update_animation(delta_time)
# Are there enemies? Update them as well
self.enemies.update_animation(delta_time)
for enemy in self.enemies:
enemy.center_x += enemy.change_x
walls_hit = arcade.check_for_collision_with_list(
sprite=enemy, sprite_list=self.walls
)
if walls_hit:
enemy.change_x *= -1
# Update player movement based on the physics engine
self.physics_engine.update()
# Restrict user movement so they can't walk off screen
if self.player.left < 0:
self.player.left = 0
# Check if we've picked up a coin
coins_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.coins
)
for coin in coins_hit:
# Add the coin score to our score
self.score += int(coin.properties["point_value"])
# Play the coin sound
arcade.play_sound(self.coin_sound)
# Remove the coin
coin.remove_from_sprite_lists()
# Has Roz collided with an enemy?
enemies_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.enemies
)
if enemies_hit:
self.setup()
title_view = TitleView()
window.show_view(title_view)
# Now check if we're at the ending goal
goals_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.goals
)
if goals_hit:
# Play the victory sound
self.victory_sound.play()
# Setup the next level
self.level += 1
self.setup()
# Set the viewport, scrolling if necessary
self.scroll_viewport()
def scroll_viewport(self) -> None:
"""Scrolls the viewport when the player gets close to the edges"""
# Scroll left
# Find the current left boundary
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
# Are we to the left of this boundary? Then we should scroll left
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
# But don't scroll past the left edge of the map
if self.view_left < 0:
self.view_left = 0
# Scroll right
# Find the current right boundary
right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN
# Are we right of this boundary? Then we should scroll right
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
# Don't scroll past the right edge of the map
if self.view_left > self.map_width - SCREEN_WIDTH:
self.view_left = self.map_width - SCREEN_WIDTH
# Scroll up
top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
# Scroll down
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
# Only scroll to integers. Otherwise we end up with pixels that
# don't line up on the screen
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Do the scrolling
arcade.set_viewport(
left=self.view_left,
right=SCREEN_WIDTH + self.view_left,
bottom=self.view_bottom,
top=SCREEN_HEIGHT + self.view_bottom,
)
def on_draw(self) -> None:
arcade.start_render()
# Draw all the sprites
self.background.draw()
self.walls.draw()
self.coins.draw()
self.goals.draw()
self.ladders.draw()
self.enemies.draw()
self.player.draw()
# Draw the score in the lower left
score_text = f"Score: {self.score}"
# First a black background for a shadow effect
arcade.draw_text(
score_text,
start_x=10 + self.view_left,
start_y=10 + self.view_bottom,
color=arcade.csscolor.BLACK,
font_size=40,
)
# Now in white slightly shifted
arcade.draw_text(
score_text,
start_x=15 + self.view_left,
start_y=15 + self.view_bottom,
color=arcade.csscolor.WHITE,
font_size=40,
)
if __name__ == "__main__":
window = arcade.Window(
width=SCREEN_WIDTH, height=SCREEN_HEIGHT, title=SCREEN_TITLE
)
title_view = TitleView()
window.show_view(title_view)
arcade.run()
|
PlatformerView
|
python
|
keras-team__keras
|
keras/src/callbacks/backup_and_restore_test.py
|
{
"start": 206,
"end": 902
}
|
class ____(callbacks.Callback):
"""A callback to intentionally interrupt training."""
def __init__(self, steps_int, epoch_int):
self.batch_count = 0
self.epoch_count = 0
self.steps_int = steps_int
self.epoch_int = epoch_int
def on_epoch_end(self, epoch, log=None):
self.epoch_count += 1
if self.epoch_int is not None and self.epoch_count == self.epoch_int:
raise RuntimeError("EpochInterruption")
def on_batch_end(self, batch, logs=None):
self.batch_count += 1
if self.steps_int is not None and self.batch_count == self.steps_int:
raise RuntimeError("StepsInterruption")
|
InterruptingCallback
|
python
|
pypa__setuptools
|
setuptools/_vendor/jaraco/collections/__init__.py
|
{
"start": 22828,
"end": 23175
}
|
class ____:
"""
A collection "containing" every possible thing.
>>> 'foo' in Everything()
True
>>> import random
>>> random.randint(1, 999) in Everything()
True
>>> random.choice([None, 'foo', 42, ('a', 'b', 'c')]) in Everything()
True
"""
def __contains__(self, other):
return True
|
Everything
|
python
|
run-llama__llama_index
|
llama-index-core/tests/agent/workflow/test_agent_with_structured_output.py
|
{
"start": 663,
"end": 3554
}
|
class ____(LLM):
def __init__(self, responses: List[ChatMessage], structured_response: str):
super().__init__()
self._responses = responses
self._structured_response = structured_response
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, **kwargs: Any
) -> List[ToolSelection]:
return response.message.additional_kwargs.get("tool_calls", [])
@override
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
@override
async def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
async def achat(self, *args, **kwargs):
pass
def chat(self, *args, **kwargs):
pass
def stream_chat(self, *args, **kwargs):
pass
def complete(self, *args, **kwargs):
pass
async def acomplete(self, *args, **kwargs):
pass
def stream_complete(self, *args, **kwargs):
pass
async def astream_complete(self, *args, **kwargs):
pass
def _prepare_chat_with_tools(self, *args, **kwargs):
return {}
|
TestLLM
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_connections.py
|
{
"start": 36589,
"end": 37712
}
|
class ____(TestConnectionEndpoint):
def test_should_respond_204(self, test_client, session):
response = test_client.post("/connections/defaults")
assert response.status_code == 204
assert response.content == b""
_check_last_log(session, dag_id=None, event="create_default_connections", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.post("/connections/defaults")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.post("/connections/defaults")
assert response.status_code == 403
@mock.patch("airflow.api_fastapi.core_api.routes.public.connections.db_create_default_connections")
def test_should_call_db_create_default_connections(self, mock_db_create_default_connections, test_client):
response = test_client.post("/connections/defaults")
assert response.status_code == 204
mock_db_create_default_connections.assert_called_once()
|
TestCreateDefaultConnections
|
python
|
huggingface__transformers
|
tests/models/opt/test_modeling_opt.py
|
{
"start": 7241,
"end": 13707
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(OPTModel, OPTForCausalLM, OPTForSequenceClassification, OPTForQuestionAnswering)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": OPTModel,
"question-answering": OPTForQuestionAnswering,
"text-classification": OPTForSequenceClassification,
"text-generation": OPTForCausalLM,
"zero-shot": OPTForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = False
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = OPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OPTConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (OPTModel,):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = OPTForCausalLM(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_opt_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = OPTForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_opt_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = OPTForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
super().test_model_parallelism()
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
|
OPTModelTest
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/dtype/npy/int.py
|
{
"start": 26052,
"end": 31915
}
|
class ____(BaseInt[np.dtypes.Int32DType, np.int32], HasEndianness):
"""
A Zarr data type for arrays containing 32-bit signed integers.
Wraps the [`np.dtypes.Int32DType`][numpy.dtypes.Int32DType] data type. Scalars for this data type are instances of
[`np.int32`][numpy.int32].
Attributes
----------
dtype_cls : np.dtypes.Int32DType
The class of the underlying NumPy dtype.
References
----------
This class implements the 32-bit signed integer data type defined in Zarr V2 and V3.
See the [Zarr V2](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding) and [Zarr V3](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v3/data-types/index.rst) specification documents for details.
"""
dtype_cls = np.dtypes.Int32DType
_zarr_v3_name: ClassVar[Literal["int32"]] = "int32"
_zarr_v2_names: ClassVar[tuple[Literal[">i4"], Literal["<i4"]]] = (">i4", "<i4")
@classmethod
def _check_native_dtype(cls: type[Self], dtype: TBaseDType) -> TypeGuard[np.dtypes.Int32DType]:
"""
A type guard that checks if the input is assignable to the type of ``cls.dtype_class``
This method is overridden for this particular data type because of a Windows-specific issue
where np.dtype('i') creates an instance of ``np.dtypes.IntDType``, rather than an
instance of ``np.dtypes.Int32DType``, even though both represent 32-bit signed integers.
Parameters
----------
dtype : TDType
The dtype to check.
Returns
-------
Bool
True if the dtype matches, False otherwise.
"""
return super()._check_native_dtype(dtype) or dtype == np.dtypes.Int32DType()
@classmethod
def from_native_dtype(cls: type[Self], dtype: TBaseDType) -> Self:
"""
Create an Int32 from a np.dtype('int32') instance.
Parameters
----------
dtype : TBaseDType
The np.dtype('int32') instance.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class Int32.
"""
if cls._check_native_dtype(dtype):
return cls(endianness=get_endianness_from_numpy_dtype(dtype))
raise DataTypeValidationError(
f"Invalid data type: {dtype}. Expected an instance of {cls.dtype_cls}"
)
def to_native_dtype(self: Self) -> np.dtypes.Int32DType:
"""
Convert the Int32 instance to a np.dtype('int32') instance.
Returns
-------
np.dtypes.Int32DType
The np.dtype('int32') instance.
"""
byte_order = endianness_to_numpy_str(self.endianness)
return self.dtype_cls().newbyteorder(byte_order)
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create an Int32 from Zarr V2-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class Int32.
"""
if cls._check_json_v2(data):
# Going via NumPy ensures that we get the endianness correct without
# annoying string parsing.
name = data["name"]
return cls.from_native_dtype(np.dtype(name))
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected one of the strings {cls._zarr_v2_names!r}."
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls, data: DTypeJSON) -> Self:
"""
Create an Int32 from Zarr V3-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class Int32.
"""
if cls._check_json_v3(data):
return cls()
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected the string {cls._zarr_v3_name!r}"
raise DataTypeValidationError(msg)
@overload
def to_json(self, zarr_format: Literal[2]) -> DTypeConfig_V2[Literal[">i4", "<i4"], None]: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> Literal["int32"]: ...
def to_json(
self, zarr_format: ZarrFormat
) -> DTypeConfig_V2[Literal[">i4", "<i4"], None] | Literal["int32"]:
"""
Serialize this ZDType to v2- or v3-flavored JSON
Parameters
----------
zarr_format : ZarrFormat
The Zarr format version (2 or 3).
Returns
-------
DTypeConfig_V2[Literal[">i4", "<i4"], None] or Literal["int32"]
The JSON representation of the Int32 instance.
Raises
------
ValueError
If the zarr_format is not 2 or 3.
"""
if zarr_format == 2:
name = self.to_native_dtype().str
return {"name": name, "object_codec_id": None}
elif zarr_format == 3:
return self._zarr_v3_name
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover
@property
def item_size(self) -> int:
"""
The size of a single scalar in bytes.
Returns
-------
int
The size of a single scalar in bytes.
"""
return 4
@dataclass(frozen=True, kw_only=True)
|
Int32
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_options.py
|
{
"start": 2658,
"end": 3471
}
|
class ____:
def _make_path(self, path):
r = []
for i, item in enumerate(path):
if i % 2 == 0:
item = inspect(item)
else:
if isinstance(item, str):
item = inspect(r[-1]).mapper.attrs[item]
r.append(item)
return tuple(r)
def _make_path_registry(self, path):
return orm_util.PathRegistry.coerce(self._make_path(path))
def _assert_path_result(self, opt, q, paths):
attr = {}
compile_state = q._compile_state()
compile_state.attributes = attr = {}
opt.process_compile_state(compile_state)
assert_paths = [k[1] for k in attr]
eq_(
{p for p in assert_paths},
{self._make_path(p) for p in paths},
)
|
PathTest
|
python
|
openai__gym
|
gym/spaces/box.py
|
{
"start": 939,
"end": 12732
}
|
class ____(Space[np.ndarray]):
r"""A (possibly unbounded) box in :math:`\mathbb{R}^n`.
Specifically, a Box represents the Cartesian product of n closed intervals.
Each interval has the form of one of :math:`[a, b]`, :math:`(-\infty, b]`,
:math:`[a, \infty)`, or :math:`(-\infty, \infty)`.
There are two common use cases:
* Identical bound for each dimension::
>>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
Box(3, 4)
* Independent bound for each dimension::
>>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
Box(2,)
"""
def __init__(
self,
low: Union[SupportsFloat, np.ndarray],
high: Union[SupportsFloat, np.ndarray],
shape: Optional[Sequence[int]] = None,
dtype: Type = np.float32,
seed: Optional[Union[int, np.random.Generator]] = None,
):
r"""Constructor of :class:`Box`.
The argument ``low`` specifies the lower bound of each dimension and ``high`` specifies the upper bounds.
I.e., the space that is constructed will be the product of the intervals :math:`[\text{low}[i], \text{high}[i]]`.
If ``low`` (or ``high``) is a scalar, the lower bound (or upper bound, respectively) will be assumed to be
this value across all dimensions.
Args:
low (Union[SupportsFloat, np.ndarray]): Lower bounds of the intervals.
high (Union[SupportsFloat, np.ndarray]): Upper bounds of the intervals.
shape (Optional[Sequence[int]]): The shape is inferred from the shape of `low` or `high` `np.ndarray`s with
`low` and `high` scalars defaulting to a shape of (1,)
dtype: The dtype of the elements of the space. If this is an integer type, the :class:`Box` is essentially a discrete space.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space.
Raises:
ValueError: If no shape information is provided (shape is None, low is None and high is None) then a
value error is raised.
"""
assert (
dtype is not None
), "Box dtype must be explicitly provided, cannot be None."
self.dtype = np.dtype(dtype)
# determine shape if it isn't provided directly
if shape is not None:
assert all(
np.issubdtype(type(dim), np.integer) for dim in shape
), f"Expect all shape elements to be an integer, actual type: {tuple(type(dim) for dim in shape)}"
shape = tuple(int(dim) for dim in shape) # This changes any np types to int
elif isinstance(low, np.ndarray):
shape = low.shape
elif isinstance(high, np.ndarray):
shape = high.shape
elif is_float_integer(low) and is_float_integer(high):
shape = (1,)
else:
raise ValueError(
f"Box shape is inferred from low and high, expect their types to be np.ndarray, an integer or a float, actual type low: {type(low)}, high: {type(high)}"
)
# Capture the boundedness information before replacing np.inf with get_inf
_low = np.full(shape, low, dtype=float) if is_float_integer(low) else low
self.bounded_below = -np.inf < _low
_high = np.full(shape, high, dtype=float) if is_float_integer(high) else high
self.bounded_above = np.inf > _high
low = _broadcast(low, dtype, shape, inf_sign="-") # type: ignore
high = _broadcast(high, dtype, shape, inf_sign="+") # type: ignore
assert isinstance(low, np.ndarray)
assert (
low.shape == shape
), f"low.shape doesn't match provided shape, low.shape: {low.shape}, shape: {shape}"
assert isinstance(high, np.ndarray)
assert (
high.shape == shape
), f"high.shape doesn't match provided shape, high.shape: {high.shape}, shape: {shape}"
self._shape: Tuple[int, ...] = shape
low_precision = get_precision(low.dtype)
high_precision = get_precision(high.dtype)
dtype_precision = get_precision(self.dtype)
if min(low_precision, high_precision) > dtype_precision: # type: ignore
logger.warn(f"Box bound precision lowered by casting to {self.dtype}")
self.low = low.astype(self.dtype)
self.high = high.astype(self.dtype)
self.low_repr = _short_repr(self.low)
self.high_repr = _short_repr(self.high)
super().__init__(self.shape, self.dtype, seed)
@property
def shape(self) -> Tuple[int, ...]:
"""Has stricter type than gym.Space - never None."""
return self._shape
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return True
def is_bounded(self, manner: str = "both") -> bool:
"""Checks whether the box is bounded in some sense.
Args:
manner (str): One of ``"both"``, ``"below"``, ``"above"``.
Returns:
If the space is bounded
Raises:
ValueError: If `manner` is neither ``"both"`` nor ``"below"`` or ``"above"``
"""
below = bool(np.all(self.bounded_below))
above = bool(np.all(self.bounded_above))
if manner == "both":
return below and above
elif manner == "below":
return below
elif manner == "above":
return above
else:
raise ValueError(
f"manner is not in {{'below', 'above', 'both'}}, actual value: {manner}"
)
def sample(self, mask: None = None) -> np.ndarray:
r"""Generates a single random sample inside the Box.
In creating a sample of the box, each coordinate is sampled (independently) from a distribution
that is chosen according to the form of the interval:
* :math:`[a, b]` : uniform distribution
* :math:`[a, \infty)` : shifted exponential distribution
* :math:`(-\infty, b]` : shifted negative exponential distribution
* :math:`(-\infty, \infty)` : normal distribution
Args:
mask: A mask for sampling values from the Box space, currently unsupported.
Returns:
A sampled value from the Box
"""
if mask is not None:
raise gym.error.Error(
f"Box.sample cannot be provided a mask, actual value: {mask}"
)
high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1
sample = np.empty(self.shape)
# Masking arrays which classify the coordinates according to interval
# type
unbounded = ~self.bounded_below & ~self.bounded_above
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)
sample[low_bounded] = (
self.np_random.exponential(size=low_bounded[low_bounded].shape)
+ self.low[low_bounded]
)
sample[upp_bounded] = (
-self.np_random.exponential(size=upp_bounded[upp_bounded].shape)
+ self.high[upp_bounded]
)
sample[bounded] = self.np_random.uniform(
low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape
)
if self.dtype.kind == "i":
sample = np.floor(sample)
return sample.astype(self.dtype)
def contains(self, x) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if not isinstance(x, np.ndarray):
logger.warn("Casting input x to numpy array.")
try:
x = np.asarray(x, dtype=self.dtype)
except (ValueError, TypeError):
return False
return bool(
np.can_cast(x.dtype, self.dtype)
and x.shape == self.shape
and np.all(x >= self.low)
and np.all(x <= self.high)
)
def to_jsonable(self, sample_n):
"""Convert a batch of samples from this space to a JSONable data type."""
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n: Sequence[Union[float, int]]) -> List[np.ndarray]:
"""Convert a JSONable data type to a batch of samples from this space."""
return [np.asarray(sample) for sample in sample_n]
def __repr__(self) -> str:
"""A string representation of this space.
The representation will include bounds, shape and dtype.
If a bound is uniform, only the corresponding scalar will be given to avoid redundant and ugly strings.
Returns:
A representation of the space
"""
return f"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})"
def __eq__(self, other) -> bool:
"""Check whether `other` is equivalent to this instance. Doesn't check dtype equivalence."""
return (
isinstance(other, Box)
and (self.shape == other.shape)
# and (self.dtype == other.dtype)
and np.allclose(self.low, other.low)
and np.allclose(self.high, other.high)
)
def __setstate__(self, state: Dict):
"""Sets the state of the box for unpickling a box with legacy support."""
super().__setstate__(state)
# legacy support through re-adding "low_repr" and "high_repr" if missing from pickled state
if not hasattr(self, "low_repr"):
self.low_repr = _short_repr(self.low)
if not hasattr(self, "high_repr"):
self.high_repr = _short_repr(self.high)
def get_inf(dtype, sign: str) -> SupportsFloat:
"""Returns an infinite that doesn't break things.
Args:
dtype: An `np.dtype`
sign (str): must be either `"+"` or `"-"`
Returns:
Gets an infinite value with the sign and dtype
Raises:
TypeError: Unknown sign, use either '+' or '-'
ValueError: Unknown dtype for infinite bounds
"""
if np.dtype(dtype).kind == "f":
if sign == "+":
return np.inf
elif sign == "-":
return -np.inf
else:
raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
elif np.dtype(dtype).kind == "i":
if sign == "+":
return np.iinfo(dtype).max - 2
elif sign == "-":
return np.iinfo(dtype).min + 2
else:
raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
else:
raise ValueError(f"Unknown dtype {dtype} for infinite bounds")
def get_precision(dtype) -> SupportsFloat:
"""Get precision of a data type."""
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).precision
else:
return np.inf
def _broadcast(
value: Union[SupportsFloat, np.ndarray],
dtype,
shape: Tuple[int, ...],
inf_sign: str,
) -> np.ndarray:
"""Handle infinite bounds and broadcast at the same time if needed."""
if is_float_integer(value):
value = get_inf(dtype, inf_sign) if np.isinf(value) else value # type: ignore
value = np.full(shape, value, dtype=dtype)
else:
assert isinstance(value, np.ndarray)
if np.any(np.isinf(value)):
# create new array with dtype, but maintain old one to preserve np.inf
temp = value.astype(dtype)
temp[np.isinf(value)] = get_inf(dtype, inf_sign)
value = temp
return value
|
Box
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_wrap.py
|
{
"start": 35587,
"end": 39531
}
|
class ____(TestCase):
def test_validate_frozen_params(self):
"""Tests the method ``_validate_frozen_params()``."""
for use_orig_params in [True, False]:
self._test_validate_frozen_params(use_orig_params)
def _test_validate_frozen_params(self, use_orig_params: bool):
model = LoraModel()
# Wrap only LoRA modules
modules_to_wrap = {
module
for module_name, module in model.named_modules()
if "lora_A" in module_name or "lora_B" in module_name
}
_validate_frozen_params(model, modules_to_wrap, set(), use_orig_params)
# Additionally wrap attention
for module in model.modules():
if isinstance(module, LoraAttention):
modules_to_wrap.add(module)
_validate_frozen_params(model, modules_to_wrap, set(), use_orig_params)
# Additionally wrap decoders
for module in model.modules():
if isinstance(module, LoraDecoder):
modules_to_wrap.add(module)
_validate_frozen_params(model, modules_to_wrap, set(), use_orig_params)
# Do not wrap the LoRA-A modules (meaning mixed frozen/non-frozen)
for module_name, module in model.named_modules():
if "lora_A" in module_name:
modules_to_wrap.remove(module)
regex = "layers.0.attn has both parameters with requires_grad=True and False."
if use_orig_params:
# Wrapping the attention manages all parameters except those from
# the LoRA-B module, which is separately wrapped and all nonfrozen
lorab_numel = sum(
p.numel() for p in model.layers[0].attn.lora_B.parameters()
)
attn_frozen_param_numel = sum(
p.numel()
for p in model.layers[0].attn.parameters()
if not p.requires_grad
)
attn_nonfrozen_param_numel = (
sum(
p.numel()
for p in model.layers[0].attn.parameters()
if p.requires_grad
)
- lorab_numel
)
attn_total_param_numel = (
attn_frozen_param_numel + attn_nonfrozen_param_numel
)
regex += (
" We do not recommend wrapping such modules since the "
r"gradient memory usage will be higher than expected \("
f"{attn_total_param_numel} numel instead of {attn_nonfrozen_param_numel} numel "
r"before sharding via reduce-scatter\). "
)
else:
regex += " FSDP does not support wrapping such modules when use_orig_params=False. "
regex += "If possible, wrap the frozen parameters with FSDP separately.\n"
regex += (
"The following parameters have requires_grad=True:\n"
r"\['layers.0.attn.lora_A.weight'\]\n"
"The following parameters have requires_grad=False:\n"
r"\['layers.0.attn.q_proj.weight', 'layers.0.attn.k_proj.weight', "
r"'layers.0.attn.v_proj.weight', 'layers.0.attn.o_proj.weight'\]"
)
if use_orig_params:
ctx = self.assertWarnsRegex(UserWarning, regex)
else:
ctx = self.assertRaisesRegex(ValueError, regex)
with ctx:
_validate_frozen_params(model, modules_to_wrap, set(), use_orig_params)
# Now ignore those LoRA-A modules' parameters
ignored_params = set()
for module_name, module in model.named_modules():
if "lora_A" in module_name:
ignored_params.update(module.parameters())
_validate_frozen_params(model, modules_to_wrap, ignored_params, use_orig_params)
instantiate_parametrized_tests(TestFSDPWrap)
instantiate_parametrized_tests(TestAutoWrap)
if __name__ == "__main__":
run_tests()
|
TestWrapUtils
|
python
|
PrefectHQ__prefect
|
tests/server/utilities/test_text_search_parser.py
|
{
"start": 19364,
"end": 21446
}
|
class ____:
"""Test realistic query parsing scenarios end-to-end"""
@pytest.mark.parametrize(
"query, expected",
[
# Simple cases
("error", TextSearchQuery(include=["error"], exclude=[], required=[])),
("-debug", TextSearchQuery(include=[], exclude=["debug"], required=[])),
(
"+required",
TextSearchQuery(include=[], exclude=[], required=["required"]),
),
# Query examples
(
"error warning timeout",
TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
),
),
(
"error -debug -test",
TextSearchQuery(
include=["error"], exclude=["debug", "test"], required=[]
),
),
(
"error !debug !test",
TextSearchQuery(
include=["error"], exclude=["debug", "test"], required=[]
),
),
(
'"connection timeout"',
TextSearchQuery(
include=["connection timeout"], exclude=[], required=[]
),
),
(
'"connection timeout" error -debug !test',
TextSearchQuery(
include=["connection timeout", "error"],
exclude=["debug", "test"],
required=[],
),
),
# Future AND syntax
(
"+error +connection -debug",
TextSearchQuery(
include=[], exclude=["debug"], required=["error", "connection"]
),
),
],
)
def test_example_queries(self, query: str, expected: TextSearchQuery) -> None:
"""Test all query examples parse correctly"""
result = parse_text_search_query(query)
assert result == expected
|
TestIntegrationScenarios
|
python
|
pallets__quart
|
src/quart/sessions.py
|
{
"start": 4629,
"end": 8752
}
|
class ____(SessionInterface):
"""A Session interface that uses cookies as storage.
This will store the data on the cookie in plain text, but with a
signature to prevent modification.
"""
digest_method = staticmethod(hashlib.sha1)
key_derivation = "hmac"
salt = "cookie-session"
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app: Quart) -> URLSafeTimedSerializer | None:
"""Return a serializer for the session that also signs data.
This will return None if the app is not configured for secrets.
"""
if not app.secret_key:
return None
keys: list[str | bytes] = []
if fallbacks := app.config["SECRET_KEY_FALLBACKS"]:
keys.extend(fallbacks)
keys.append(app.secret_key) # itsdangerous expects current key at top
options = {
"key_derivation": self.key_derivation,
"digest_method": self.digest_method,
}
return URLSafeTimedSerializer(
keys, # type: ignore[arg-type]
salt=self.salt,
serializer=self.serializer,
signer_kwargs=options,
)
async def open_session(
self, app: Quart, request: BaseRequestWebsocket
) -> SecureCookieSession | None:
"""Open a secure cookie based session.
This will return None if a signing serializer is not available,
usually if the config SECRET_KEY is not set.
"""
signer = self.get_signing_serializer(app)
if signer is None:
return None
cookie = request.cookies.get(self.get_cookie_name(app))
if cookie is None:
return self.session_class()
max_age = int(app.permanent_session_lifetime.total_seconds())
try:
data = signer.loads(cookie, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
async def save_session(
self,
app: Quart,
session: SessionMixin,
response: Response | WerkzeugResponse | None,
) -> None:
"""Saves the session to the response in a secure cookie."""
if response is None:
if session.modified:
app.logger.exception(
"Secure Cookie Session modified during websocket handling. "
"These modifications will be lost as a cookie cannot be set."
)
return
name = self.get_cookie_name(app)
domain = self.get_cookie_domain(app)
partitioned = self.get_cookie_partitioned(app)
path = self.get_cookie_path(app)
secure = self.get_cookie_secure(app)
samesite = self.get_cookie_samesite(app)
httponly = self.get_cookie_httponly(app)
# Add a "Vary: Cookie" header if the session was accessed at all.
if session.accessed:
response.vary.add("Cookie")
# If the session is modified to be empty, remove the cookie.
# If the session is empty, return without setting the cookie.
if not session:
if session.modified:
response.delete_cookie(
name,
domain=domain,
partitioned=partitioned,
path=path,
secure=secure,
samesite=samesite,
httponly=httponly,
)
response.vary.add("Cookie")
return
if not self.should_set_cookie(app, session):
return
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(
name,
val,
expires=expires,
httponly=httponly,
domain=domain,
partitioned=partitioned,
path=path,
secure=secure,
samesite=samesite,
)
response.vary.add("Cookie")
|
SecureCookieSessionInterface
|
python
|
tensorflow__tensorflow
|
tensorflow/python/util/dispatch_test.py
|
{
"start": 16191,
"end": 16351
}
|
class ____(extension_type.ExtensionType):
"""Simple ExtensionType for testing v2 dispatch."""
values: tensor_lib.Tensor
mask: tensor_lib.Tensor
|
MaskedTensor
|
python
|
catalyst-team__catalyst
|
catalyst/callbacks/metrics/r2_squared.py
|
{
"start": 112,
"end": 2312
}
|
class ____(LoaderMetricCallback):
"""R2 Squared metric callback.
Args:
input_key: input key to use for r2squared calculation, specifies our ``y_true``
target_key: output key to use for r2squared calculation, specifies our ``y_pred``
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# data
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
# model training
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
num_epochs=8,
verbose=True,
callbacks=[
dl.R2SquaredCallback(input_key="logits", target_key="targets")
]
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=R2Squared(prefix=prefix, suffix=suffix),
input_key=input_key,
target_key=target_key,
)
__all__ = ["R2SquaredCallback"]
|
R2SquaredCallback
|
python
|
astropy__astropy
|
astropy/io/ascii/html.py
|
{
"start": 1495,
"end": 3149
}
|
class ____(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
if not HAS_BS4:
raise core.OptionalTableImportError(
"BeautifulSoup must be installed to read HTML tables"
)
from bs4 import BeautifulSoup
if "parser" not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings(
"ignore", ".*no parser was explicitly specified.*"
)
soup = BeautifulSoup("\n".join(lines))
else: # use a custom backend parser
soup = BeautifulSoup("\n".join(lines), self.html["parser"])
tables = soup.find_all("table")
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html["table_id"], int):
err_descr = f"number {self.html['table_id']}"
else:
err_descr = f"id '{self.html['table_id']}'"
raise core.InconsistentTableError(
f"ERROR: HTML table {err_descr} not found"
)
# Get all table rows
return [SoupString(x) for x in table.find_all("tr")]
|
HTMLInputter
|
python
|
pytorch__pytorch
|
torchgen/selective_build/operator.py
|
{
"start": 434,
"end": 6521
}
|
class ____:
# The name of the operator. This includes the aten::, etc... prefix
# The operator name may or may not have the overload name. If this
# operator name does not specify an overload name, the way to determine
# if this entry refers to the family of operators with this base name
# or just the operator with this name is to look at the value of the
# 'include_all_overloads' flag in this class.
name: str
# True if this is a root operator (i.e. called directly from a
# TorchScript model, etc...). An operator is considered to be a
# root operator if it is called directly from any one of the models
# that this instance of the pytorch library was built for. Hence, it
# may not be a root operator in all of the models that are used in
# this instance of the pytorch library.
is_root_operator: bool
# Is this operator used for on-device training? If True, then we need to
# use the information to generate code in VariableType_N.cpp for registration
# of training related operators. Again, this is True if this operator
# is used for training in one or more models used by this instance of the
# pytorch library.
is_used_for_training: bool
# If True, it indicates that this operator instance (object) refers to an
# operator without the overload name and should apply to all overloads
# which have this operator name as the base name. This flag is applicable
# only for objects that have operator names without a DOT (period) character
# in them.
#
# Note: This flag is a temporary workaround to grandfather in the current
# static selective (custom) build mechanism, which largely ignores overload
# names when determining whether to select operators for registration
# purposes.
include_all_overloads: bool
# Debug Information at the operator level
_debug_info: tuple[str, ...] | None
@staticmethod
def from_yaml_dict(
op_name: str, op_info: dict[str, object]
) -> SelectiveBuildOperator:
allowed_keys = {
"name",
"is_root_operator",
"is_used_for_training",
"include_all_overloads",
"debug_info",
}
if len(set(op_info.keys()) - allowed_keys) > 0:
raise Exception( # noqa: TRY002
"Got unexpected top level keys: {}".format(
",".join(set(op_info.keys()) - allowed_keys),
)
)
if "name" in op_info:
assert op_name == op_info["name"]
is_root_operator = op_info.get("is_root_operator", True)
assert isinstance(is_root_operator, bool)
is_used_for_training = op_info.get("is_used_for_training", True)
assert isinstance(is_used_for_training, bool)
include_all_overloads = op_info.get("include_all_overloads", True)
assert isinstance(include_all_overloads, bool)
debug_info: tuple[str, ...] | None = None
if "debug_info" in op_info:
di_list = op_info["debug_info"]
assert isinstance(di_list, list)
debug_info = tuple(str(x) for x in di_list)
return SelectiveBuildOperator(
name=op_name,
is_root_operator=is_root_operator,
is_used_for_training=is_used_for_training,
include_all_overloads=include_all_overloads,
_debug_info=debug_info,
)
@staticmethod
def from_legacy_operator_name_without_overload(
name: str,
) -> SelectiveBuildOperator:
return SelectiveBuildOperator(
name=name,
is_root_operator=True,
is_used_for_training=True,
include_all_overloads=True,
_debug_info=None,
)
def to_dict(self) -> dict[str, object]:
ret: dict[str, object] = {
"is_root_operator": self.is_root_operator,
"is_used_for_training": self.is_used_for_training,
"include_all_overloads": self.include_all_overloads,
}
if self._debug_info is not None:
ret["debug_info"] = self._debug_info
return ret
def merge_debug_info(
lhs: tuple[str, ...] | None,
rhs: tuple[str, ...] | None,
) -> tuple[str, ...] | None:
# Ensure that when merging, each entry shows up just once.
if lhs is None and rhs is None:
return None
return tuple(set((lhs or ()) + (rhs or ())))
def combine_operators(
lhs: SelectiveBuildOperator, rhs: SelectiveBuildOperator
) -> SelectiveBuildOperator:
if str(lhs.name) != str(rhs.name):
raise Exception( # noqa: TRY002
f"Expected both arguments to have the same name, but got '{str(lhs.name)}' and '{str(rhs.name)}' instead"
)
return SelectiveBuildOperator(
name=lhs.name,
# Consider this operator to be a root operator if it is a
# root operator in any of the models used in this instance of
# the pytorch library.
is_root_operator=lhs.is_root_operator or rhs.is_root_operator,
# Consider this operator to be a training operator if it is
# an operator used for training in any of the models used
# in this instance of the pytorch library.
is_used_for_training=lhs.is_used_for_training or rhs.is_used_for_training,
include_all_overloads=lhs.include_all_overloads or rhs.include_all_overloads,
_debug_info=merge_debug_info(lhs._debug_info, rhs._debug_info),
)
def merge_operator_dicts(
lhs: dict[str, SelectiveBuildOperator],
rhs: dict[str, SelectiveBuildOperator],
) -> dict[str, SelectiveBuildOperator]:
operators: dict[str, SelectiveBuildOperator] = {}
for op_name, op in list(lhs.items()) + list(rhs.items()):
new_op = op
if op_name in operators:
new_op = combine_operators(operators[op_name], op)
operators[op_name] = new_op
return operators
def strip_operator_overload_name(op_name: str) -> str:
return op_name.split(".", maxsplit=1)[0]
|
SelectiveBuildOperator
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_superfences.py
|
{
"start": 10968,
"end": 12474
}
|
class ____(util.MdCase):
"""Test highlight line wraps."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'anchor_linenums': True,
'linenums_style': 'inline'
}
}
def test_linespans(self):
"""Test wrapping a line in line spans."""
self.check_markdown(
r'''
```python linenums="2"
import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><a id="__codelineno-0-2" name="__codelineno-0-2"></a><a href="#__codelineno-0-2"><span class="linenos">2</span></a><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_linespans_id(self):
"""Test wrapping a line in line spans with an ID."""
self.check_markdown(
r'''
```{.python #id linenums="2"}
import test
```
''',
r'''
<div id="id" class="highlight"><pre><span></span><code><a id="__codelineno-id-2" name="__codelineno-id-2"></a><a href="#__codelineno-id-2"><span class="linenos">2</span></a><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
|
TestHighlightAnchorLinenumInline
|
python
|
ray-project__ray
|
python/ray/_common/usage/usage_lib.py
|
{
"start": 5658,
"end": 6001
}
|
class ____:
"""Usage stats to write to `USAGE_STATS_FILE`
We are writing extra metadata such as the status of report
to this file.
"""
usage_stats: UsageStatsToReport
# Whether or not the last report succeeded.
success: bool
# The error message of the last report if it happens.
error: str
|
UsageStatsToWrite
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 5457,
"end": 5621
}
|
class ____(PostWithUniqField):
new_field = models.CharField(max_length=10)
class Meta:
app_label = "django_extensions"
|
InheritedFromPostWithUniqField
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_bool_returned.py
|
{
"start": 415,
"end": 521
}
|
class ____(type):
def __bool__(cls):
return True
@six.add_metaclass(BoolMetaclass)
|
BoolMetaclass
|
python
|
patrick-kidger__equinox
|
equinox/nn/_conv.py
|
{
"start": 26651,
"end": 27741
}
|
class ____(ConvTranspose):
"""As [`equinox.nn.ConvTranspose`][] with `num_spatial_dims=3`."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = (1, 1, 1),
output_padding: int | Sequence[int] = (0, 0, 0),
padding: str | int | Sequence[int] | Sequence[tuple[int, int]] = (0, 0, 0),
dilation: int | Sequence[int] = (1, 1, 1),
groups: int = 1,
use_bias: bool = True,
padding_mode: str = "ZEROS",
dtype=None,
*,
key: PRNGKeyArray,
):
super().__init__(
num_spatial_dims=3,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
padding_mode=padding_mode,
dtype=dtype,
key=key,
)
|
ConvTranspose3d
|
python
|
ray-project__ray
|
python/ray/data/_internal/block_batching/iter_batches.py
|
{
"start": 973,
"end": 17930
}
|
class ____:
"""Defines an iterator pipeline to convert a stream of block object references
into a stream of formatted batches ready to be consumed by the user.
This takes a block iterator and creates batch_size batches, slicing,
unioning, shuffling, prefetching, and formatting blocks as needed.
This involves both pipeline parallelism (e.g. prefetching)
and data parallelism (e.g. threadpool operations):
If prefetch_batches=2, these are all the batches in flight:
[User thread] trains on Batch 0
- [Fetch thread] Batch 1 finalization + move to output queue
- [Worker thread 1] Batch 2 formatting + collating
- [Worker thread 2] Batch 3 formatting + collating
- [Raylet] Batches 4 + 5 fetched to local object store memory
At any point in time there are prefetch_batches+1 batches in local heap memory.
And the next set of prefetch_batches in local object store memory.
The actual steps are as follows:
In a single async thread, do the following:
1. Trigger Ray local prefetching of `prefetch_batches` worth of block object
references.
2. Resolve (i.e. call `ray.get()`) on the block references.
3. Perform the necessary batch slicing to construct full batches, possibly
shuffling if necessary.
4. Then, in a threadpool consisting of `prefetch_batches` threads:
a. Format the batches to the provided batch format.
b. Apply the collate function.
5. Finalize each of the collated batches
6. Fetch outputs from the threadpool, maintaining order of the batches.
Args:
ref_bundles: An iterator over RefBundles.
stats: DatasetStats object to record timing and other statistics.
dataset_tag: The tag of the dataset to record timing and other statistics.
clear_block_after_read: Whether to clear the block from object store
manually (i.e. without waiting for Python's automatic GC) after it
is read. Doing so will reclaim memory faster and hence reduce the
memory footprint. However, the caller has to ensure the safety, i.e.
the block will never be accessed again.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "default" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``, or None to use entire blocks
as batches. Default is "default".
drop_last: Whether to drop the last batch if it's incomplete.
collate_fn: A function to apply to each data batch before returning it.
finalize_fn: A function to apply to each data batch after it has been collated.
This function is not run in a threadpool so it can be used for
memory-intensive operations such as GPU preloading.
shuffle_buffer_min_size: If non-None, the data will be randomly shuffled using a
local in-memory shuffle buffer, and this value will serve as the minimum
number of rows that must be in the local in-memory shuffle buffer in order
to yield a batch.
shuffle_seed: The seed to use for the local random shuffle.
ensure_copy: Whether batches are always copied from the underlying base
blocks (not zero-copy views).
prefetch_batches: The number of batches to fetch ahead of the current batch to
process. If set to greater than 0, a separate thread will be used to fetch
the specified amount of formatted batches from blocks. This improves
performance for non-CPU bound UDFs, allowing batch fetching compute and
formatting to be overlapped with the UDF. Defaults to 1.
"""
UPDATE_METRICS_INTERVAL_S: float = 5.0
def __init__(
self,
ref_bundles: Iterator[RefBundle],
*,
stats: Optional[DatasetStats] = None,
dataset_tag: Optional[str] = None,
clear_block_after_read: bool = False,
batch_size: Optional[int] = None,
batch_format: Optional[str] = "default",
drop_last: bool = False,
collate_fn: Optional[Callable[[DataBatch], Any]] = None,
finalize_fn: Optional[Callable[[Any], Any]] = None,
shuffle_buffer_min_size: Optional[int] = None,
shuffle_seed: Optional[int] = None,
ensure_copy: bool = False,
prefetch_batches: int = 1,
):
self._ref_bundles = ref_bundles
self._stats = stats
self._dataset_tag = dataset_tag
self._batch_size = batch_size
self._batch_format = batch_format
self._drop_last = drop_last
self._collate_fn = collate_fn
self._finalize_fn = finalize_fn
self._shuffle_buffer_min_size = shuffle_buffer_min_size
self._shuffle_seed = shuffle_seed
self._ensure_copy = ensure_copy
self._prefetch_batches = prefetch_batches
self._eager_free = (
clear_block_after_read and DataContext.get_current().eager_free
)
actor_prefetcher_enabled = (
prefetch_batches > 0
and DataContext.get_current().actor_prefetcher_enabled
and not ray.util.client.ray.is_connected()
)
self._prefetcher = (
ActorBlockPrefetcher()
if actor_prefetcher_enabled
else WaitBlockPrefetcher()
)
self._yielded_first_batch = False
# This stores the last time we updated the metrics.
# This allows us to update metrics on some interval,
# by comparing it with the current timestamp.
self._metrics_last_updated: float = 0.0
def _prefetch_blocks(
self, ref_bundles: Iterator[RefBundle]
) -> Iterator[ObjectRef[Block]]:
return prefetch_batches_locally(
ref_bundles=ref_bundles,
prefetcher=self._prefetcher,
num_batches_to_prefetch=self._prefetch_batches,
batch_size=self._batch_size,
eager_free=self._eager_free,
stats=self._stats,
)
def _resolve_block_refs(
self, block_refs: Iterator[ObjectRef[Block]]
) -> Iterator[Block]:
return resolve_block_refs(block_ref_iter=block_refs, stats=self._stats)
def _blocks_to_batches(self, blocks: Iterator[Block]) -> Iterator[Batch]:
return blocks_to_batches(
block_iter=blocks,
stats=self._stats,
batch_size=self._batch_size,
drop_last=self._drop_last,
shuffle_buffer_min_size=self._shuffle_buffer_min_size,
shuffle_seed=self._shuffle_seed,
ensure_copy=self._ensure_copy,
)
def _format_batches(self, batches: Iterator[Batch]) -> Iterator[Batch]:
num_threadpool_workers = min(
DEFAULT_FORMAT_THREADPOOL_NUM_WORKERS, self._prefetch_batches
)
return _format_in_threadpool(
batch_iter=batches,
stats=self._stats,
batch_format=self._batch_format,
collate_fn=self._collate_fn,
num_threadpool_workers=num_threadpool_workers,
)
def _finalize_batches(
self,
batch_iter: Iterator[Batch],
) -> Iterator[Batch]:
if self._finalize_fn is None:
return batch_iter
return finalize_batches(
batch_iter, finalize_fn=self._finalize_fn, stats=self._stats
)
def _restore_original_batch_order(
self, batches: Iterator[Batch]
) -> Iterator[Batch]:
return restore_original_order(batches)
def _pipeline(self, ref_bundles: Iterator[RefBundle]) -> Iterator[Batch]:
# Step 1: Prefetch logical batches locally.
block_iter = self._prefetch_blocks(ref_bundles)
# Step 2: Resolve the blocks.
block_iter = self._resolve_block_refs(block_iter)
# Step 3: Batch and shuffle the resolved blocks.
batch_iter = self._blocks_to_batches(block_iter)
# Step 4: Format and collate the batches in a threadpool.
batch_iter = self._format_batches(batch_iter)
# Step 5: Finalize the batches (e.g., move to GPU).
batch_iter = self._finalize_batches(batch_iter)
# Step 6: Restore the original order of the batches, as the prior
# threadpool operations may have reordered the batches non-deterministically.
batch_iter = self._restore_original_batch_order(batch_iter)
yield from batch_iter
def _iter_batches(self) -> Iterator[DataBatch]:
async_batch_iter = make_async_gen(
self._ref_bundles,
fn=self._pipeline,
num_workers=1,
preserve_ordering=False,
buffer_size=max(self._prefetch_batches, 1),
)
self.before_epoch_start()
while True:
with self.get_next_batch_context():
try:
batch = next(async_batch_iter)
except StopIteration:
break
with self.yield_batch_context(batch):
yield batch.data
self.after_epoch_end()
def __iter__(self) -> Iterator[DataBatch]:
return self._iter_batches()
def before_epoch_start(self):
self._yielded_first_batch = False
def after_epoch_end(self):
if self._stats is None:
return
_StatsManager.update_iteration_metrics(self._stats, self._dataset_tag)
@contextmanager
def get_next_batch_context(self):
try:
if self._stats:
# Always track total blocked time
total_timer = self._stats.iter_total_blocked_s.timer()
# Also track the time until the first batch is ready
first_batch_ready_timer = (
self._stats.iter_time_to_first_batch_s.timer()
if not self._yielded_first_batch
else nullcontext()
)
with total_timer, first_batch_ready_timer:
yield
else:
yield
finally:
self._yielded_first_batch = True
@contextmanager
def yield_batch_context(self, batch: Batch):
with self._stats.iter_user_s.timer() if self._stats else nullcontext():
yield
if self._stats is None:
return
now = time.time()
if (now - self._metrics_last_updated) > self.UPDATE_METRICS_INTERVAL_S:
_StatsManager.update_iteration_metrics(self._stats, self._dataset_tag)
self._metrics_last_updated = now
def _format_in_threadpool(
batch_iter: Iterator[Batch],
stats: DatasetStats,
batch_format: Optional[str],
collate_fn: Optional[Callable[[DataBatch], Any]],
num_threadpool_workers: int,
) -> Iterator[Batch]:
"""Executes the batching, formatting, and collation logic in a threadpool.
Args:
logical_batch_iterator: An iterator over logical batches.
stats: DatasetStats object to record timing and other statistics.
batch_format: The format in which to return each batch.
Specify "default" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``, or None to use entire blocks
as batches.
collate_fn: A function to apply to each data batch before returning it.
num_threadpool_workers: The number of threads to use in the threadpool.
"""
def threadpool_computations_format_collate(
batch_iter: Iterator[Batch],
) -> Iterator[Batch]:
# Step 4a: Format the batches.
formatted_batch_iter = format_batches(
batch_iter, batch_format=batch_format, stats=stats
)
# Step 4b: Apply the collate function if applicable.
if collate_fn is not None:
formatted_batch_iter = collate(
formatted_batch_iter, collate_fn=collate_fn, stats=stats
)
yield from formatted_batch_iter
if num_threadpool_workers > 0:
collated_iter = make_async_gen(
base_iterator=batch_iter,
fn=threadpool_computations_format_collate,
preserve_ordering=False,
num_workers=num_threadpool_workers,
)
else:
collated_iter = threadpool_computations_format_collate(batch_iter)
return collated_iter
def prefetch_batches_locally(
ref_bundles: Iterator[RefBundle],
prefetcher: BlockPrefetcher,
num_batches_to_prefetch: int,
batch_size: Optional[int],
eager_free: bool = False,
stats: Optional[DatasetStats] = None,
) -> Iterator[ObjectRef[Block]]:
"""Given an iterator of batched RefBundles, returns an iterator over the
corresponding block references while prefetching `num_batches_to_prefetch`
batches in advance.
Args:
ref_bundles: An iterator over batched RefBundles.
prefetcher: The prefetcher to use.
num_batches_to_prefetch: The number of batches to prefetch ahead of the
current batch during the scan.
batch_size: User specified batch size, or None to let the system pick.
eager_free: Whether to eagerly free the object reference from the object store.
stats: Dataset stats object used to store ref bundle retrieval time.
"""
def get_next_ref_bundle() -> RefBundle:
with stats.iter_get_ref_bundles_s.timer() if stats else nullcontext():
return next(ref_bundles)
sliding_window = collections.deque()
current_window_size = 0
if num_batches_to_prefetch <= 0:
if stats:
stats.iter_prefetched_bytes = 0
for ref_bundle in ref_bundles:
for block_ref in ref_bundle.block_refs:
yield block_ref
return
if batch_size is not None:
num_rows_to_prefetch = num_batches_to_prefetch * batch_size
else:
num_rows_to_prefetch = None
# Create and fetch the initial window.
# Stop adding if the number of rows in this window is greater than requested
# batch size, or if the batch size is None and the number of blocks in this window
# is greater than requested batches to prefetch.
while (batch_size is not None and current_window_size < num_rows_to_prefetch) or (
batch_size is None and len(sliding_window) < num_batches_to_prefetch
):
try:
next_ref_bundle = get_next_ref_bundle()
sliding_window.extend(next_ref_bundle.blocks)
current_window_size += next_ref_bundle.num_rows()
except StopIteration:
break
prefetcher.prefetch_blocks([block_ref for block_ref, _ in list(sliding_window)])
if stats:
stats.iter_prefetched_bytes = sum(
metadata.size_bytes or 0 for _, metadata in sliding_window
)
while sliding_window:
block_ref, metadata = sliding_window.popleft()
current_window_size -= metadata.num_rows
if batch_size is None or current_window_size < num_rows_to_prefetch:
try:
next_ref_bundle = get_next_ref_bundle()
for block_ref_and_md in next_ref_bundle.blocks:
sliding_window.append(block_ref_and_md)
current_window_size += block_ref_and_md[1].num_rows
prefetcher.prefetch_blocks(
[block_ref for block_ref, _ in list(sliding_window)]
)
except StopIteration:
pass
if stats:
stats.iter_prefetched_bytes = sum(
metadata.size_bytes or 0 for _, metadata in sliding_window
)
yield block_ref
trace_deallocation(block_ref, loc="iter_batches", free=eager_free)
prefetcher.stop()
def restore_original_order(batch_iter: Iterator[Batch]) -> Iterator[Batch]:
"""Restores the original order of the provided `batch_iter`
This function will yield items from `base_iterator` in the correct order based on
each batch's batch_idx. All indexes are expected to be unique.
`batch_iter` is expected to not have any missing indexes. All indexes from 0 to len
(base_iterator) must be present.
"""
next_index_required = 0
buffer: Dict[int, Batch] = {}
for batch in batch_iter:
assert batch.metadata.batch_idx not in buffer
buffer[batch.metadata.batch_idx] = batch
while next_index_required in buffer:
yield buffer.pop(next_index_required)
next_index_required += 1
while next_index_required in buffer:
yield buffer.pop(next_index_required)
next_index_required += 1
|
BatchIterator
|
python
|
google__pytype
|
pytype/rewrite/frame_test.py
|
{
"start": 628,
"end": 2191
}
|
class ____(test_utils.ContextfulTestBase):
def _make_frame(self, src: str, name: str = '__main__') -> frame_lib.Frame:
code = test_utils.parse(src)
if name == '__main__':
module_globals = self.ctx.abstract_loader.get_module_globals()
initial_locals = initial_globals = {
name: value.to_variable() for name, value in module_globals.items()
}
else:
initial_locals = initial_globals = {}
return frame_lib.Frame(
self.ctx,
name,
code,
initial_locals=initial_locals,
initial_globals=initial_globals,
)
def _const_var(self, const, name=None):
return self.ctx.consts[const].to_variable(name)
def assertConstantVar(self, var, expected):
val = var.get_atomic_value()
self.assertIsInstance(val, abstract.PythonConstant)
self.assertEqual(val.constant, expected)
def run_block(self, block: str, *, consts=()) -> frame_lib.Frame:
"""Run a block of opcodes without checking frame exit conditions."""
code = test_utils.assemble_block(block, consts=consts)
blk = code.order[0].code
n = len(blk)
# Add a NOP at the end so there is always an opcode.next
blk.append(opcodes.NOP(n, blk[-1].line))
frame = frame_lib.Frame(self.ctx, 'test', code.Seal())
frame.stepn(n)
return frame
def run_frame_until(self, code: str, *, condition) -> frame_lib.Frame:
"""Run a block of opcodes until condition is met."""
frame = self._make_frame(code)
while not condition(frame):
frame.step()
return frame
|
FrameTestBase
|
python
|
django-haystack__django-haystack
|
test_haystack/core/models.py
|
{
"start": 1242,
"end": 1492
}
|
class ____(models.Model):
author = models.CharField(max_length=255)
editor = models.CharField(max_length=255)
pub_date = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return self.author
|
AFourthMockModel
|
python
|
numba__numba
|
numba/tests/test_debug.py
|
{
"start": 3294,
"end": 3636
}
|
class ____(DebugTestBase):
func_name = 'simple_nopython'
def compile_simple_nopython(self):
with captured_stdout() as out:
cfunc = njit((types.int64,))(simple_nopython)
# Sanity check compiled function
self.assertPreciseEqual(cfunc(2), 3)
return out.getvalue()
|
FunctionDebugTestBase
|
python
|
skorch-dev__skorch
|
examples/nuclei_image_segmentation/dataset.py
|
{
"start": 663,
"end": 2821
}
|
class ____(Dataset):
"""Creates patches of cells.
Parameters
----------
base_dataset: CellsDataset
Dataset of cells
patch_size: tuple of ints (default=(256, 256))
The size of each patch
random_flips: bool (default=False)
If true, patches and masks will be randomly flipped horizontally and
vertically.
padding: int (default=16)
Amount of paddding around each image and mask
"""
def __init__(self,
base_dataset,
patch_size=(256, 256),
random_flips=False,
padding=16):
super().__init__()
self.base_dataset = base_dataset
self.patch_size = patch_size
self.patch_size_expanded = (patch_size[0] + 2 * padding,
patch_size[1] + 2 * padding)
self.padding = padding
self.random_flips = random_flips
coords = []
for idx, (_, mask) in enumerate(self.base_dataset):
w, h = mask.size
bboxes = calcuate_bboxes((h, w), self.patch_size)
idx_bboxes = list(zip_longest([], bboxes, fillvalue=idx))
coords.extend(idx_bboxes)
self.coords = coords
def __len__(self):
return len(self.coords)
def __getitem__(self, idx):
img_idx, (i, j) = self.coords[idx]
cell, mask = self.base_dataset[img_idx]
h, w = self.patch_size_expanded
cell = pad(cell, self.padding, padding_mode='reflect')
mask = pad(mask, self.padding, padding_mode='reflect')
cell = crop(cell, i, j, h, w)
mask = crop(mask, i, j, h, w)
if self.random_flips:
if random.random() < 0.5:
cell = hflip(cell)
mask = hflip(mask)
if random.random() < 0.5:
cell = vflip(cell)
mask = vflip(mask)
cell = to_tensor(cell)
mask = torch.as_tensor((np.array(mask) == 255).astype('float32'))
# mean and std of imagenet
cell = normalize(cell, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
return cell, mask
|
PatchedDataset
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py
|
{
"start": 1271,
"end": 18648
}
|
class ____(ABC, Generic[T_Packable]):
"""Utility class to wrap metadata values passed into Dagster events so that they can be
displayed in the Dagster UI and other tooling.
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"my_text_label": "hello",
"dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),
"num_rows": 0,
},
)
"""
@public
@property
@abstractmethod
def value(self) -> T_Packable:
"""The wrapped value."""
raise NotImplementedError()
@public
@staticmethod
def text(text: str) -> "TextMetadataValue":
"""Static constructor for a metadata value wrapping text as
:py:class:`TextMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"my_text_label": MetadataValue.text("hello")
},
)
Args:
text (str): The text string for a metadata entry.
"""
return TextMetadataValue(text)
@public
@staticmethod
def url(url: str) -> "UrlMetadataValue":
"""Static constructor for a metadata value wrapping a URL as
:py:class:`UrlMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context):
yield AssetMaterialization(
asset_key="my_dashboard",
metadata={
"dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),
}
)
Args:
url (str): The URL for a metadata entry.
"""
return UrlMetadataValue(url)
@public
@staticmethod
def path(path: Union[str, PathLike]) -> "PathMetadataValue":
"""Static constructor for a metadata value wrapping a path as
:py:class:`PathMetadataValue`.
Example:
.. code-block:: python
@op
def emit_metadata(context):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"filepath": MetadataValue.path("path/to/file"),
}
)
Args:
path (str): The path for a metadata entry.
"""
return PathMetadataValue(path)
@public
@staticmethod
def notebook(path: Union[str, PathLike]) -> "NotebookMetadataValue":
"""Static constructor for a metadata value wrapping a notebook path as
:py:class:`NotebookMetadataValue`.
Example:
.. code-block:: python
@op
def emit_metadata(context):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"notebook_path": MetadataValue.notebook("path/to/notebook.ipynb"),
}
)
Args:
path (str): The path to a notebook for a metadata entry.
"""
return NotebookMetadataValue(path)
@public
@staticmethod
def json(data: Union[Sequence[Any], Mapping[str, Any]]) -> "JsonMetadataValue":
"""Static constructor for a metadata value wrapping a json-serializable list or dict
as :py:class:`JsonMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context):
yield ExpectationResult(
success=not missing_things,
label="is_present",
metadata={
"about my dataset": MetadataValue.json({"missing_columns": missing_things})
},
)
Args:
data (Union[Sequence[Any], Mapping[str, Any]]): The JSON data for a metadata entry.
"""
return JsonMetadataValue(data)
@public
@staticmethod
def md(data: str) -> "MarkdownMetadataValue":
"""Static constructor for a metadata value wrapping markdown data as
:py:class:`MarkdownMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, md_str):
yield AssetMaterialization(
asset_key="info",
metadata={
'Details': MetadataValue.md(md_str)
},
)
Args:
md_str (str): The markdown for a metadata entry.
"""
return MarkdownMetadataValue(data)
@public
@staticmethod
def python_artifact(python_artifact: Callable[..., Any]) -> "PythonArtifactMetadataValue":
"""Static constructor for a metadata value wrapping a python artifact as
:py:class:`PythonArtifactMetadataValue`. Can be used as the value type for the
`metadata` parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"class": MetadataValue.python_artifact(MyClass),
"function": MetadataValue.python_artifact(my_function),
}
)
Args:
value (Callable): The python class or function for a metadata entry.
"""
check.callable_param(python_artifact, "python_artifact")
return PythonArtifactMetadataValue(python_artifact.__module__, python_artifact.__name__)
@public
@staticmethod
def float(value: float) -> "FloatMetadataValue":
"""Static constructor for a metadata value wrapping a float as
:py:class:`FloatMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"size (bytes)": MetadataValue.float(calculate_bytes(df)),
}
)
Args:
value (float): The float value for a metadata entry.
"""
return FloatMetadataValue(value)
@public
@staticmethod
def int(value: int) -> "IntMetadataValue":
"""Static constructor for a metadata value wrapping an int as
:py:class:`IntMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"number of rows": MetadataValue.int(len(df)),
},
)
Args:
value (int): The int value for a metadata entry.
"""
return IntMetadataValue(value)
@public
@staticmethod
def bool(value: bool) -> "BoolMetadataValue":
"""Static constructor for a metadata value wrapping a bool as
:py:class:`BoolMetadataValuye`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"num rows > 1000": MetadataValue.bool(len(df) > 1000),
},
)
Args:
value (bool): The bool value for a metadata entry.
"""
return BoolMetadataValue(value)
@public
@staticmethod
def timestamp(value: Union["float", datetime]) -> "TimestampMetadataValue":
"""Static constructor for a metadata value wrapping a UNIX timestamp as a
:py:class:`TimestampMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Args:
value (Union[float, datetime]): The unix timestamp value for a metadata entry. If a
datetime is provided, the timestamp will be extracted. datetimes without timezones
are not accepted, because their timestamps can be ambiguous.
"""
if isinstance(value, float):
return TimestampMetadataValue(value)
elif isinstance(value, datetime):
if value.tzinfo is None:
check.failed(
"Datetime values provided to MetadataValue.timestamp must have timezones, "
f"but {value.isoformat()} does not"
)
return TimestampMetadataValue(value.timestamp())
else:
check.failed(f"Expected either a float or a datetime, but received a {type(value)}")
@public
@staticmethod
def dagster_run(run_id: str) -> "DagsterRunMetadataValue":
"""Static constructor for a metadata value wrapping a reference to a Dagster run.
Args:
run_id (str): The ID of the run.
"""
return DagsterRunMetadataValue(run_id)
@public
@staticmethod
def asset(asset_key: AssetKey) -> "DagsterAssetMetadataValue":
"""Static constructor for a metadata value referencing a Dagster asset, by key.
For example:
.. code-block:: python
@op
def validate_table(context, df):
yield AssetMaterialization(
asset_key=AssetKey("my_table"),
metadata={
"Related asset": MetadataValue.asset(AssetKey('my_other_table')),
},
)
Args:
asset_key (AssetKey): The asset key referencing the asset.
"""
from dagster._core.definitions.events import AssetKey
check.inst_param(asset_key, "asset_key", AssetKey)
return DagsterAssetMetadataValue(asset_key)
@public
@staticmethod
def job(
job_name: str,
location_name: str,
*,
repository_name: Optional[str] = None,
) -> "DagsterJobMetadataValue":
"""Static constructor for a metadata value referencing a Dagster job, by name.
For example:
.. code-block:: python
from dagster import AssetMaterialization, MetadataValue, op
@op
def emit_metadata(context, df):
yield AssetMaterialization(
asset_key="my_dataset",
metadata={
"Producing job": MetadataValue.job('my_other_job', 'my_location'),
},
)
Args:
job_name (str): The name of the job.
location_name (Optional[str]): The code location name for the job.
repository_name (Optional[str]): The repository name of the job, if different from the
default.
"""
return DagsterJobMetadataValue(
job_name=check.str_param(job_name, "job_name"),
location_name=check.str_param(location_name, "location_name"),
repository_name=check.opt_str_param(repository_name, "repository_name"),
)
@public
@staticmethod
def table(
records: Sequence[TableRecord], schema: Optional[TableSchema] = None
) -> "TableMetadataValue":
"""Static constructor for a metadata value wrapping arbitrary tabular data as
:py:class:`TableMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Example:
.. code-block:: python
@op
def emit_metadata(context):
yield ExpectationResult(
success=not has_errors,
label="is_valid",
metadata={
"errors": MetadataValue.table(
records=[
TableRecord(data={"code": "invalid-data-type", "row": 2, "col": "name"})
],
schema=TableSchema(
columns=[
TableColumn(name="code", type="string"),
TableColumn(name="row", type="int"),
TableColumn(name="col", type="string"),
]
)
),
},
)
"""
return TableMetadataValue(records, schema)
@public
@staticmethod
def table_schema(
schema: TableSchema,
) -> "TableSchemaMetadataValue":
"""Static constructor for a metadata value wrapping a table schema as
:py:class:`TableSchemaMetadataValue`. Can be used as the value type
for the `metadata` parameter for supported events.
Example:
.. code-block:: python
schema = TableSchema(
columns = [
TableColumn(name="id", type="int"),
TableColumn(name="status", type="bool"),
]
)
DagsterType(
type_check_fn=some_validation_fn,
name='MyTable',
metadata={
'my_table_schema': MetadataValue.table_schema(schema),
}
)
Args:
schema (TableSchema): The table schema for a metadata entry.
"""
return TableSchemaMetadataValue(schema)
@public
@staticmethod
def column_lineage(
lineage: TableColumnLineage,
) -> "TableColumnLineageMetadataValue":
"""Static constructor for a metadata value wrapping a column lineage as
:py:class:`TableColumnLineageMetadataValue`. Can be used as the value type
for the `metadata` parameter for supported events.
Args:
lineage (TableColumnLineage): The column lineage for a metadata entry.
"""
return TableColumnLineageMetadataValue(lineage)
@public
@staticmethod
def null() -> "NullMetadataValue":
"""Static constructor for a metadata value representing null. Can be used as the value type
for the `metadata` parameter for supported events.
"""
return NullMetadataValue()
# not public because rest of code location metadata API is not public
@staticmethod
def code_location_reconstruction(data: str) -> "CodeLocationReconstructionMetadataValue":
"""Static constructor for a metadata value wrapping arbitrary code location data useful during reconstruction as
:py:class:`CodeLocationReconstructionMetadataValue`. Can be used as the value type for the `metadata`
parameter for supported events.
Args:
data (str): The serialized code location state for a metadata entry.
"""
return CodeLocationReconstructionMetadataValue(data)
@public
@staticmethod
def pool(pool: str) -> "PoolMetadataValue":
"""Static constructor for a metadata value wrapping a reference to a concurrency pool.
Args:
pool (str): The identifier for the pool.
"""
return PoolMetadataValue(pool=pool)
# ########################
# ##### METADATA VALUE TYPES
# ########################
# NOTE: We have `type: ignore` in a few places below because mypy complains about an instance method
# (e.g. `text`) overriding a static method on the superclass of the same name. This is not a concern
# for us because these static methods should never be called on instances.
# NOTE: `XMetadataValue` classes are serialized with a storage name of `XMetadataEntryData` to
# maintain backward compatibility. See docstring of `whitelist_for_serdes` for more info.
@public
@whitelist_for_serdes(storage_name="TextMetadataEntryData")
@record(kw_only=False)
|
MetadataValue
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.